diff --git a/sdcclient/__init__.py b/sdcclient/__init__.py index ed95c314..2c176ac9 100644 --- a/sdcclient/__init__.py +++ b/sdcclient/__init__.py @@ -1,3 +1,4 @@ -from sdcclient._client import SdcClient -from sdcclient._client import SdMonitorClient -from sdcclient._client import SdSecureClient +from sdcclient._monitor import SdcClient +from sdcclient._monitor import SdMonitorClient +from sdcclient._secure import SdSecureClient +from sdcclient._scanning import SdScanningClient diff --git a/sdcclient/_client.py b/sdcclient/_client.py deleted file mode 100644 index 87cc7dae..00000000 --- a/sdcclient/_client.py +++ /dev/null @@ -1,2452 +0,0 @@ -import os -import json -import requests -import copy -import datetime -import shutil - -class _SdcCommon(object): - '''Interact with the Sysdig Monitor/Secure API. - - **Arguments** - - **token**: A Sysdig Monitor/Secure API token from the *Sysdig Cloud API* section of the Settings page for `monitor `_ or .`secure `_. - - **sdc_url**: URL for contacting the Sysdig API server. Set this in `On-Premises installs `__. - - **ssl_verify**: Whether to verify certificate. Set to False if using a self-signed certificate in an `On-Premises install `__. - - **Returns** - An object for further interactions with the Sysdig Monitor/Secure API. See methods below. - ''' - lasterr = None - - def __init__(self, token="", sdc_url='https://app.sysdigcloud.com', ssl_verify=True): - self.token = os.environ.get("SDC_TOKEN", token) - self.hdrs = {'Authorization': 'Bearer ' + self.token, 'Content-Type': 'application/json'} - self.url = os.environ.get("SDC_URL", sdc_url) - self.ssl_verify = os.environ.get("SDC_SSL_VERIFY", None) - if self.ssl_verify == None: - self.ssl_verify = ssl_verify - else: - self.ssl_verify = self.ssl_verify.lower() == 'true' - - def _checkResponse(self, res): - if res.status_code >= 300: - errorcode = res.status_code - self.lasterr = None - - try: - j = res.json() - except: - self.lasterr = 'status code ' + str(errorcode) - return False - - if 'errors' in j: - if 'message' in j['errors'][0]: - self.lasterr = j['errors'][0]['message'] - - if 'reason' in j['errors'][0]: - if self.lasterr is not None: - self.lasterr += ' ' - else: - self.lasrerr = '' - - self.lasterr += j['errors'][0]['reason'] - elif 'message' in j: - self.lasterr = j['message'] - else: - self.lasterr = 'status code ' + str(errorcode) - return False - return True - - def get_user_info(self): - '''**Description** - Get details about the current user. - - **Success Return Value** - A dictionary containing information about the user, for example its email and the maximum number of agents it can install. - - **Example** - `examples/print_user_info.py `_ - ''' - res = requests.get(self.url + '/api/user/me', headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - return [True, res.json()] - - def get_user_token(self): - '''**Description** - Return the API token of the current user. - - **Success Return Value** - A string containing the user token. - ''' - res = requests.get(self.url + '/api/token', headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - tkinfo = res.json() - - return [True, tkinfo['token']['key']] - - def get_connected_agents(self): - '''**Description** - Return the agents currently connected to Sysdig Monitor for the current user. - - **Success Return Value** - A list of the agents with all their attributes. - ''' - res = requests.get(self.url + '/api/agents/connected', headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - data = res.json() - return [True, data['agents']] - - def get_n_connected_agents(self): - '''**Description** - Return the number of agents currently connected to Sysdig Monitor for the current user. - - **Success Return Value** - An integer number. - ''' - res = requests.get(self.url + '/api/agents/connected', headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - data = res.json() - return [True, data['total']] - - def get_notification_ids(self, channels=None): - '''**Description** - Get an array of all configured Notification Channel IDs, or a filtered subset of them. - - **Arguments** - - **channels**: an optional array of dictionaries to limit the set of Notification Channel IDs returned. If not specified, IDs for all configured Notification Channels are returned. Each dictionary contains a ``type`` field that can be one of the available types of Notification Channel (``EMAIL``, ``SNS``, ``PAGER_DUTY``, ``SLACK``, ``OPSGENIE``, ``VICTOROPS``, ``WEBHOOK``) as well as additional elements specific to each channel type. - - **Success Return Value** - An array of Notification Channel IDs (integers). - - **Examples** - - `examples/create_alert.py `_ - - `examples/restore_alerts.py `_ - ''' - - res = requests.get(self.url + '/api/notificationChannels', headers=self.hdrs, verify=self.ssl_verify) - - if not self._checkResponse(res): - return [False, self.lasterr] - - ids = [] - - # If no array of channel types/names was provided to filter by, - # just return them all. - if channels is None: - for ch in res.json()["notificationChannels"]: - ids.append(ch['id']) - return [True, ids] - - # Return the filtered set of channels based on the provided types/names array. - # Should try and improve this M * N lookup - for c in channels: - found = False - for ch in res.json()["notificationChannels"]: - if c['type'] == ch['type']: - if c['type'] == 'SNS': - opt = ch['options'] - if set(opt['snsTopicARNs']) == set(c['snsTopicARNs']): - found = True - ids.append(ch['id']) - elif c['type'] == 'EMAIL': - opt = ch['options'] - if 'emailRecipients' in c: - if set(c['emailRecipients']) == set(opt['emailRecipients']): - found = True - ids.append(ch['id']) - elif 'name' in c: - if c['name'] == ch.get('name'): - found = True - ids.append(ch['id']) - elif c['type'] == 'PAGER_DUTY': - opt = ch['options'] - if opt['account'] == c['account'] and opt['serviceName'] == c['serviceName']: - found = True - ids.append(ch['id']) - elif c['type'] == 'SLACK': - opt = ch['options'] - if 'channel' in opt and opt['channel'] == c['channel']: - found = True - ids.append(ch['id']) - elif c['type'] == 'OPSGENIE': - if 'name' in c: - if c['name'] == ch.get('name'): - found = True - ids.append(ch['id']) - elif c['type'] == 'VICTOROPS': - if 'name' in c: - if c['name'] == ch.get('name'): - found = True - ids.append(ch['id']) - elif c['type'] == 'WEBHOOK': - if 'name' in c: - if c['name'] == ch.get('name'): - found = True - ids.append(ch['id']) - if not found: - return [False, "Channel not found: " + str(c)] - - return [True, ids] - - def create_email_notification_channel(self, channel_name, email_recipients): - channel_json = { - 'notificationChannel' : { - 'type' : 'EMAIL', - 'name' : channel_name, - 'enabled' : True, - 'options' : { - 'emailRecipients' : email_recipients - } - } - } - - res = requests.post(self.url + '/api/notificationChannels', headers=self.hdrs, data=json.dumps(channel_json), verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - return [True, res.json()] - - def get_notification_channel(self, id): - - res = requests.get(self.url + '/api/notificationChannels/' + str(id), headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - return [True, res.json()['notificationChannel']] - - def update_notification_channel(self, channel): - if 'id' not in channel: - return [False, "Invalid channel format"] - - res = requests.put(self.url + '/api/notificationChannels/' + str(channel['id']), headers=self.hdrs, data=json.dumps({ "notificationChannel": channel }), verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - return [True, res.json()] - - def delete_notification_channel(self, channel): - if 'id' not in channel: - return [False, "Invalid channel format"] - - res = requests.delete(self.url + '/api/notificationChannels/' + str(channel['id']), headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - return [True, None] - - def get_data_retention_info(self): - '''**Description** - Return the list of data retention intervals, with beginning and end UTC time for each of them. Sysdig Monitor performs rollups of the data it stores. This means that data is stored at different time granularities depending on how far back in time it is. This call can be used to know what precision you can expect before you make a call to :func:`~SdcClient.get_data`. - - **Success Return Value** - A dictionary containing the list of available sampling intervals. - - **Example** - `examples/print_data_retention_info.py `_ - ''' - res = requests.get(self.url + '/api/history/timelines/', headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - return [True, res.json()] - - def get_topology_map(self, grouping_hierarchy, time_window_s, sampling_time_s): - # - # Craft the time interval section - # - tlines = self.get_data_retention_info() - - for tline in tlines[1]['agents']: - if tline['sampling'] == sampling_time_s * 1000000: - timeinfo = tline - - if timeinfo is None: - return [False, "sampling time " + str(sampling_time_s) + " not supported"] - - timeinfo['from'] = timeinfo['to'] - timeinfo['sampling'] - - # - # Create the grouping hierarchy - # - gby = [{'metric': g} for g in grouping_hierarchy] - - # - # Prepare the json - # - req_json = { - 'format': { - 'type': 'map', - 'exportProcess': True - }, - 'time': timeinfo, - #'filter': { - # 'filters': [ - # { - # 'metric': 'agent.tag.Tag', - # 'op': '=', - # 'value': 'production-maintenance', - # 'filters': None - # } - # ], - # 'logic': 'and' - #}, - 'limit': { - 'hostGroups': 20, - 'hosts': 20, - 'containers': 20, - 'processes': 10 - }, - 'group': { - 'configuration': { - 'groups': [ - { - 'filters': [], - 'groupBy': gby - } - ] - } - }, - 'nodeMetrics': [ - { - 'id': 'cpu.used.percent', - 'aggregation': 'timeAvg', - 'groupAggregation': 'avg' - } - ], - 'linkMetrics': [ - { - 'id': 'net.bytes.total', - 'aggregation': 'timeAvg', - 'groupAggregation': 'sum' - } - ] - } - - # - # Fire the request - # - res = requests.post(self.url + '/api/data?format=map', headers=self.hdrs, - data=json.dumps(req_json), verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - return [True, res.json()] - - def post_event(self, name, description=None, severity=None, event_filter=None, tags=None): - '''**Description** - Send an event to Sysdig Monitor. The events you post are available in the Events tab in the Sysdig Monitor UI and can be overlied to charts. - - **Arguments** - - **name**: the name of the new event. - - **description**: a longer description offering detailed information about the event. - - **severity**: syslog style from 0 (high) to 7 (low). - - **event_filter**: metadata, in Sysdig Monitor format, of nodes to associate with the event, e.g. ``host.hostName = 'ip-10-1-1-1' and container.name = 'foo'``. - - **tags**: a list of key-value dictionaries that can be used to tag the event. Can be used for filtering/segmenting purposes in Sysdig Monitor. - - **Success Return Value** - A dictionary describing the new event. - - **Examples** - - `examples/post_event_simple.py `_ - - `examples/post_event.py `_ - ''' - edata = { - 'event': { - 'name': name - } - } - - if description is not None: - edata['event']['description'] = description - - if severity is not None: - edata['event']['severity'] = severity - - if event_filter is not None: - edata['event']['filter'] = event_filter - - if tags is not None: - edata['event']['tags'] = tags - - res = requests.post(self.url + '/api/events/', headers=self.hdrs, data=json.dumps(edata), verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - return [True, res.json()] - - def get_events(self, name=None, from_ts=None, to_ts=None, tags=None): - '''**Description** - Returns the list of Sysdig Monitor events. - - **Arguments** - - **name**: filter events by name. - - **from_ts**: filter events by start time. Timestamp format is in UTC (seconds). - - **to_ts**: filter events by end time. Timestamp format is in UTC (seconds). - - **tags**: filter events by tags. Can be, for example ``tag1 = 'value1'``. - - **Success Return Value** - A dictionary containing the list of events. - - **Example** - `examples/list_events.py `_ - ''' - params = {} - - if name is not None: - params['name'] = name - - if from_ts is not None: - params['from'] = from_ts - - if to_ts is not None: - params['to'] = to_ts - - if tags is not None: - params['tags'] = tags - - res = requests.get(self.url + '/api/events/', headers=self.hdrs, params=params, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - return [True, res.json()] - - def delete_event(self, event): - '''**Description** - Deletes an event. - - **Arguments** - - **event**: the event object as returned by :func:`~SdcClient.get_events`. - - **Success Return Value** - `None`. - - **Example** - `examples/delete_event.py `_ - ''' - if 'id' not in event: - return [False, "Invalid event format"] - - res = requests.delete(self.url + '/api/events/' + str(event['id']), headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - return [True, None] - - def get_data(self, metrics, start_ts, end_ts=0, sampling_s=0, - filter='', datasource_type='host', paging=None): - '''**Description** - Export metric data (both time-series and table-based). - - **Arguments** - - **metrics**: a list of dictionaries, specifying the metrics and grouping keys that the query will return. A metric is any of the entries that can be found in the *Metrics* section of the Explore page in Sysdig Monitor. Metric entries require an *aggregations* section specifying how to aggregate the metric across time and containers/hosts. A grouping key is any of the entries that can be found in the *Show* or *Segment By* sections of the Explore page in Sysdig Monitor. These entries are used to apply single or hierarchical segmentation to the returned data and don't require the aggregations section. Refer to the Example link below for ready-to-use code snippets. - - **start_ts**: the UTC time (in seconds) of the beginning of the data window. A negative value can be optionally used to indicate a relative time in the past from now. For example, -3600 means "one hour ago". - - **end_ts**: the UTC time (in seconds) of the end of the data window, or 0 to indicate "now". A negative value can also be optionally used to indicate a relative time in the past from now. For example, -3600 means "one hour ago". - - **sampling_s**: the duration of the samples that will be returned. 0 means that the whole data will be returned as a single sample. - - **filter**: a boolean expression combining Sysdig Monitor segmentation criteria that defines what the query will be applied to. For example: *kubernetes.namespace.name='production' and container.image='nginx'*. - - **datasource_type**: specify the metric source for the request, can be ``container`` or ``host``. Most metrics, for example ``cpu.used.percent`` or ``memory.bytes.used``, are reported by both hosts and containers. By default, host metrics are used, but if the request contains a container-specific grouping key in the metric list/filter (e.g. ``container.name``), then the container source is used. In cases where grouping keys are missing or apply to both hosts and containers (e.g. ``tag.Name``), *datasource_type* can be explicitly set to avoid any ambiguity and allow the user to select precisely what kind of data should be used for the request. `examples/get_data_datasource.py `_ contains a few examples that should clarify the use of this argument. - - **paging**: if segmentation of the query generates values for several different entities (e.g. containers/hosts), this parameter specifies which to include in the returned result. It's specified as a dictionary of inclusive values for ``from`` and ``to`` with the default being ``{ "from": 0, "to": 9 }``, which will return values for the "top 10" entities. The meaning of "top" is query-dependent, based on points having been sorted via the specified group aggregation, with the results sorted in ascending order if the group aggregation is ``min`` or ``none``, and descending order otherwise. - - **Success Return Value** - A dictionary with the requested data. Data is organized in a list of time samples, each of which includes a UTC timestamp and a list of values, whose content and order reflect what was specified in the *metrics* argument. - - **Examples** - - `examples/get_data_simple.py `_ - - `examples/get_data_advanced.py `_ - - `examples/list_hosts.py `_ - - `examples/get_data_datasource.py `_ - ''' - reqbody = { - 'metrics': metrics, - 'dataSourceType': datasource_type, - } - - if start_ts < 0: - reqbody['last'] = -start_ts - elif start_ts == 0: - return [False, "start_ts cannot be 0"] - else: - reqbody['start'] = start_ts - reqbody['end'] = end_ts - - if filter != '': - reqbody['filter'] = filter - - if paging is not None: - reqbody['paging'] = paging - - if sampling_s != 0: - reqbody['sampling'] = sampling_s - - res = requests.post(self.url + '/api/data/', headers=self.hdrs, data=json.dumps(reqbody), verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - return [True, res.json()] - - def get_sysdig_captures(self): - '''**Description** - Returns the list of sysdig captures for the user. - - **Success Return Value** - A dictionary containing the list of captures. - - **Example** - `examples/list_sysdig_captures.py `_ - ''' - res = requests.get(self.url + '/api/sysdig', headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - return [True, res.json()] - - def poll_sysdig_capture(self, capture): - '''**Description** - Fetch the updated state of a sysdig capture. Can be used to poll the status of a capture that has been previously created and started with :func:`~SdcClient.create_sysdig_capture`. - - **Arguments** - - **capture**: the capture object as returned by :func:`~SdcClient.get_sysdig_captures` or :func:`~SdcClient.create_sysdig_capture`. - - **Success Return Value** - A dictionary showing the updated details of the capture. Use the ``status`` field to check the progress of a capture. - - **Example** - `examples/create_sysdig_capture.py `_ - ''' - if 'id' not in capture: - return [False, 'Invalid capture format'] - - res = requests.get(self.url + '/api/sysdig/' + str(capture['id']), headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - return [True, res.json()] - - def create_sysdig_capture(self, hostname, capture_name, duration, capture_filter='', folder='/'): - '''**Description** - Create a new sysdig capture. The capture will be immediately started. - - **Arguments** - - **hostname**: the hostname of the instrumented host where the capture will be taken. - - **capture_name**: the name of the capture. - - **duration**: the duration of the capture, in seconds. - - **capture_filter**: a sysdig filter expression. - - **folder**: directory in the S3 bucket where the capture will be saved. - - **Success Return Value** - A dictionary showing the details of the new capture. - - **Example** - `examples/create_sysdig_capture.py `_ - ''' - res = self.get_connected_agents() - if not res[0]: - return res - - capture_agent = None - - for agent in res[1]: - if hostname == agent['hostName']: - capture_agent = agent - break - - if capture_agent is None: - return [False, hostname + ' not found'] - - data = { - 'agent': capture_agent, - 'name' : capture_name, - 'duration': duration, - 'folder': folder, - 'filters': capture_filter, - 'bucketName': '' - } - - res = requests.post(self.url + '/api/sysdig', headers=self.hdrs, data=json.dumps(data), verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - return [True, res.json()] - - def create_user_invite(self, user_email, first_name=None, last_name=None, system_role=None): - '''**Description** - Invites a new user to use Sysdig Monitor. This should result in an email notification to the specified address. - - **Arguments** - - **user_email**: the email address of the user that will be invited to use Sysdig Monitor - - **first_name**: the first name of the user being invited - - **last_name**: the last name of the user being invited - - **system_role**: system-wide privilege level for this user regardless of team. specify 'ROLE_CUSTOMER' to create an Admin. if not specified, default is a non-Admin ('ROLE_USER'). - - **Success Return Value** - The newly created user. - - **Examples** - - `examples/user_team_mgmt.py `_ - - `examples/user_team_mgmt_extended.py `_ - - ''' - # Look up the list of users to see if this exists, do not create if one exists - res = requests.get(self.url + '/api/users', headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - data = res.json() - for user in data['users']: - if user['username'] == user_email: - return [False, 'user ' + user_email + ' already exists'] - - # Create the user - user_json = {'username': user_email} - - if first_name is not None: - user_json['firstName'] = first_name - - if last_name is not None: - user_json['lastName'] = last_name - - if system_role is not None: - user_json['systemRole'] = system_role - - res = requests.post(self.url + '/api/users', headers=self.hdrs, data=json.dumps(user_json), verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - return [True, res.json()] - - def delete_user(self, user_email): - '''**Description** - Deletes a user from Sysdig Monitor. - - **Arguments** - - **user_email**: the email address of the user that will be deleted from Sysdig Monitor - - **Example** - `examples/user_team_mgmt.py `_ - ''' - res = self.get_user_ids([user_email]) - if res[0] == False: - return res - userid = res[1][0] - res = requests.delete(self.url + '/api/users/' + str(userid), headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - return [True, None] - - def get_user(self, user_email): - res = requests.get(self.url + '/api/users', headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - for u in res.json()['users']: - if u['username'] == user_email: - return [True, u] - return [False, 'User not found'] - - def get_users(self): - '''**Description** - Return a list containing details about all users in the Sysdig Monitor environment. The API token must have Admin rights for this to succeed. - - **Success Return Value** - A list user objects - ''' - res = requests.get(self.url + '/api/users', headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - return [True, res.json()['users']] - - def edit_user(self, user_email, firstName=None, lastName=None, systemRole=None): - res = self.get_user(user_email) - if res[0] == False: - return res - user = res[1] - reqbody = { - 'systemRole': systemRole if systemRole else user['systemRole'], - 'username': user_email, - 'enabled': user.get('enabled', False), - 'version': user['version'] - } - - if firstName == None: - reqbody['firstName'] = user['firstName'] if 'firstName' in list(user.keys()) else '' - else: - reqbody['firstName'] = firstName - - if lastName == None: - reqbody['lastName'] = user['lastName'] if 'lastName' in list(user.keys()) else '' - else: - reqbody['lastName'] = lastName - - res = requests.put(self.url + '/api/users/' + str(user['id']), headers=self.hdrs, data=json.dumps(reqbody), verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - return [True, 'Successfully edited user'] - - def get_teams(self, team_filter=''): - '''**Description** - Return the set of teams that match the filter specified. The *team_filter* should be a substring of the names of the teams to be returned. - - **Arguments** - - **team_filter**: the team filter to match when returning the list of teams - - **Success Return Value** - The teams that match the filter. - ''' - res = requests.get(self.url + '/api/teams', headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - ret = [t for t in res.json()['teams'] if team_filter in t['name']] - return [True, ret] - - def get_team(self, name): - '''**Description** - Return the team with the specified team name, if it is present. - - **Arguments** - - **name**: the name of the team to return - - **Success Return Value** - The requested team. - - **Example** - `examples/user_team_mgmt.py `_ - ''' - res = self.get_teams(name) - if res[0] == False: - return res - for t in res[1]: - if t['name'] == name: - return [True, t] - return [False, 'Could not find team'] - - def get_team_ids(self, teams): - res = requests.get(self.url + '/api/teams', headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - u = [x for x in res.json()['teams'] if x['name'] in teams] - return [True, [x['id'] for x in u]] - - def _get_user_id_dict(self, users): - res = requests.get(self.url + '/api/users', headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - u = [x for x in res.json()['users'] if x['username'] in users] - return [True, dict((user['username'], user['id']) for user in u)] - - def _get_id_user_dict(self, user_ids): - res = requests.get(self.url + '/api/users', headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - u = [x for x in res.json()['users'] if x['id'] in user_ids] - return [True, dict((user['id'], user['username']) for user in u)] - - def get_user_ids(self, users): - res = self._get_user_id_dict(users) - if res[0] == False: - return res - else: - return [True, list(res[1].values())] - - def create_team(self, name, memberships=None, filter='', description='', show='host', theme='#7BB0B2', - perm_capture=False, perm_custom_events=False, perm_aws_data=False): - ''' - **Description** - Creates a new team - - **Arguments** - - **name**: the name of the team to create. - - **memberships**: dictionary of (user-name, team-role) pairs that should describe new memberships of the team. - - **filter**: the scope that this team is able to access within Sysdig Monitor. - - **description**: describes the team that will be created. - - **show**: possible values are *host*, *container*. - - **theme**: the color theme that Sysdig Monitor will use when displaying the team. - - **perm_capture**: if True, this team will be allowed to take sysdig captures. - - **perm_custom_events**: if True, this team will be allowed to view all custom events from every user and agent. - - **perm_aws_data**: if True, this team will have access to all AWS metrics and tags, regardless of the team's scope. - - **Success Return Value** - The newly created team. - - **Example** - `examples/user_team_mgmt.py `_ - ''' - reqbody = { - 'name': name, - 'description': description, - 'theme': theme, - 'show': show, - 'canUseSysdigCapture': perm_capture, - 'canUseCustomEvents': perm_custom_events, - 'canUseAwsMetrics': perm_aws_data, - } - - # Map user-names to IDs - if memberships != None and len(memberships) != 0: - res = self._get_user_id_dict(list(memberships.keys())) - if res[0] == False: - return [False, 'Could not fetch IDs for user names'] - reqbody['userRoles'] = [ - { - 'userId': user_id, - 'role': memberships[user_name] - } - for (user_name, user_id) in res[1].items() - ] - else: - reqbody['users'] = [] - - if filter != '': - reqbody['filter'] = filter - - res = requests.post(self.url + '/api/teams', headers=self.hdrs, data=json.dumps(reqbody), verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - return [True, res.json()] - - def edit_team(self, name, memberships=None, filter=None, description=None, show=None, theme=None, - perm_capture=None, perm_custom_events=None, perm_aws_data=None): - ''' - **Description** - Edits an existing team. All arguments are optional. Team settings for any arguments unspecified will remain at their current settings. - - **Arguments** - - **name**: the name of the team to edit. - - **memberships**: dictionary of (user-name, team-role) pairs that should describe new memberships of the team. - - **filter**: the scope that this team is able to access within Sysdig Monitor. - - **description**: describes the team that will be created. - - **show**: possible values are *host*, *container*. - - **theme**: the color theme that Sysdig Monitor will use when displaying the team. - - **perm_capture**: if True, this team will be allowed to take sysdig captures. - - **perm_custom_events**: if True, this team will be allowed to view all custom events from every user and agent. - - **perm_aws_data**: if True, this team will have access to all AWS metrics and tags, regardless of the team's scope. - - **Success Return Value** - The edited team. - - **Example** - `examples/user_team_mgmt.py `_ - ''' - res = self.get_team(name) - if res[0] == False: - return res - - t = res[1] - reqbody = { - 'name': name, - 'theme': theme if theme else t['theme'], - 'show': show if show else t['show'], - 'canUseSysdigCapture': perm_capture if perm_capture else t['canUseSysdigCapture'], - 'canUseCustomEvents': perm_custom_events if perm_custom_events else t['canUseCustomEvents'], - 'canUseAwsMetrics': perm_aws_data if perm_aws_data else t['canUseAwsMetrics'], - 'id': t['id'], - 'version': t['version'] - } - - # Handling team description - if description is not None: - reqbody['description'] = description - elif 'description' in list(t.keys()): - reqbody['description'] = t['description'] - - # Handling for users to map (user-name, team-role) pairs to memberships - if memberships != None: - res = self._get_user_id_dict(list(memberships.keys())) - if res[0] == False: - return [False, 'Could not convert user names to IDs'] - reqbody['userRoles'] = [ - { - 'userId': user_id, - 'role': memberships[user_name] - } - for (user_name, user_id) in res[1].items() - ] - elif 'userRoles' in list(t.keys()): - reqbody['userRoles'] = t['userRoles'] - else: - reqbody['userRoles'] = [] - - # Special handling for filters since we don't support blank filters - if filter != None: - reqbody['filter'] = filter - elif 'filter' in list(t.keys()): - reqbody['filter'] = t['filter'] - - res = requests.put(self.url + '/api/teams/' + str(t['id']), headers=self.hdrs, data=json.dumps(reqbody), verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - return [True, res.json()] - - def delete_team(self, name): - '''**Description** - Deletes a team from Sysdig Monitor. - - **Arguments** - - **name**: the name of the team that will be deleted from Sysdig Monitor - - **Example** - `examples/user_team_mgmt.py `_ - ''' - res = self.get_team(name) - if res[0] == False: - return res - - t = res[1] - res = requests.delete(self.url + '/api/teams/' + str(t['id']), headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - return [True, None] - - def list_memberships(self, team): - ''' - **Description** - List all memberships for specified team. - - **Arguments** - - **team**: the name of the team for which we want to see memberships - - **Result** - Dictionary of (user-name, team-role) pairs that should describe memberships of the team. - - **Example** - `examples/user_team_mgmt_extended.py `_ - ''' - res = self.get_team(team) - if res[0] == False: - return res - - raw_memberships = res[1]['userRoles'] - user_ids = [m['userId'] for m in raw_memberships] - - res = self._get_id_user_dict(user_ids) - if res[0] == False: - return [False, 'Could not fetch IDs for user names'] - else: - id_user_dict = res[1] - - return [True, dict([(id_user_dict[m['userId']], m['role']) for m in raw_memberships])] - - def save_memberships(self, team, memberships): - ''' - **Description** - Create new user team memberships or update existing ones. - - **Arguments** - - **team**: the name of the team for which we are creating new memberships - - **memberships**: dictionary of (user-name, team-role) pairs that should describe new memberships - - **Example** - `examples/user_team_mgmt_extended.py `_ - ''' - - res = self.list_memberships(team) - - if res[0] is False: - return res - - full_memberships = res[1] - full_memberships.update(memberships) - - res = self.edit_team(team, full_memberships) - - if res[0] is False: - return res - else: - return [True, None] - - def remove_memberships(self, team, users): - ''' - **Description** - Remove user memberships from specified team. - - **Arguments** - - **team**: the name of the team from which user memberships are removed - - **users**: list of usernames which should be removed from team - - **Example** - `examples/user_team_mgmt_extended.py `_ - ''' - - res = self.list_memberships(team) - - if res[0] is False: - return res - - old_memberships = res[1] - new_memberships = {k: v for k, v in old_memberships.items() if k not in users} - - res = self.edit_team(team, new_memberships) - - if res[0] is False: - return res - else: - return [True, None] - - def get_agents_config(self): - res = requests.get(self.url + '/api/agents/config', headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - data = res.json() - return [True, data] - - def set_agents_config(self, config): - res = requests.put(self.url + '/api/agents/config', headers=self.hdrs, data=json.dumps(config), verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - return [True, res.json()] - - def clear_agents_config(self): - data = {'files' : []} - self.set_agents_config(data) - - def get_user_api_token(self, username, teamname): - res = self.get_team(teamname) - if res[0] == False: - return res - - t = res[1] - - res = requests.get(self.url + '/api/token/%s/%d' % (username, t['id']), headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - data = res.json() - return [True, data['token']['key']] - -class SdMonitorClient(_SdcCommon): - - def __init__(self, token="", sdc_url='https://app.sysdigcloud.com', ssl_verify=True): - super(SdMonitorClient, self).__init__(token, sdc_url, ssl_verify) - - def get_alerts(self): - '''**Description** - Retrieve the list of alerts configured by the user. - - **Success Return Value** - An array of alert dictionaries, with the format described at `this link `__ - - **Example** - `examples/list_alerts.py `_ - ''' - res = requests.get(self.url + '/api/alerts', headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - return [True, res.json()] - - def get_notifications(self, from_ts, to_ts, state=None, resolved=None): - '''**Description** - Returns the list of Sysdig Monitor alert notifications. - - **Arguments** - - **from_ts**: filter events by start time. Timestamp format is in UTC (seconds). - - **to_ts**: filter events by start time. Timestamp format is in UTC (seconds). - - **state**: filter events by alert state. Supported values are ``OK`` and ``ACTIVE``. - - **resolved**: filter events by resolution status. Supported values are ``True`` and ``False``. - - **Success Return Value** - A dictionary containing the list of notifications. - - **Example** - `examples/list_alert_notifications.py `_ - ''' - params = {} - - if from_ts is not None: - params['from'] = from_ts * 1000000 - - if to_ts is not None: - params['to'] = to_ts * 1000000 - - if state is not None: - params['state'] = state - - if resolved is not None: - params['resolved'] = resolved - - res = requests.get(self.url + '/api/notifications', headers=self.hdrs, params=params, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - return [True, res.json()] - - def update_notification_resolution(self, notification, resolved): - '''**Description** - Updates the resolution status of an alert notification. - - **Arguments** - - **notification**: notification object as returned by :func:`~SdcClient.get_notifications`. - - **resolved**: new resolution status. Supported values are ``True`` and ``False``. - - **Success Return Value** - The updated notification. - - **Example** - `examples/resolve_alert_notifications.py `_ - ''' - if 'id' not in notification: - return [False, 'Invalid notification format'] - - notification['resolved'] = resolved - data = {'notification': notification} - - res = requests.put(self.url + '/api/notifications/' + str(notification['id']), headers=self.hdrs, data=json.dumps(data), verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - return [True, res.json()] - - def create_alert(self, name=None, description=None, severity=None, for_atleast_s=None, condition=None, - segmentby=[], segment_condition='ANY', user_filter='', notify=None, enabled=True, - annotations={}, alert_obj=None): - '''**Description** - Create a threshold-based alert. - - **Arguments** - - **name**: the alert name. This will appear in the Sysdig Monitor UI and in notification emails. - - **description**: the alert description. This will appear in the Sysdig Monitor UI and in notification emails. - - **severity**: syslog-encoded alert severity. This is a number from 0 to 7 where 0 means 'emergency' and 7 is 'debug'. - - **for_atleast_s**: the number of consecutive seconds the condition must be satisfied for the alert to fire. - - **condition**: the alert condition, as described here https://app.sysdigcloud.com/apidocs/#!/Alerts/post_api_alerts - - **segmentby**: a list of Sysdig Monitor segmentation criteria that can be used to apply the alert to multiple entities. For example, segmenting a CPU alert by ['host.mac', 'proc.name'] allows to apply it to any process in any machine. - - **segment_condition**: When *segmentby* is specified (and therefore the alert will cover multiple entities) this field is used to determine when it will fire. In particular, you have two options for *segment_condition*: **ANY** (the alert will fire when at least one of the monitored entities satisfies the condition) and **ALL** (the alert will fire when all of the monitored entities satisfy the condition). - - **user_filter**: a boolean expression combining Sysdig Monitor segmentation criteria that makes it possible to reduce the scope of the alert. For example: *kubernetes.namespace.name='production' and container.image='nginx'*. - - **notify**: the type of notification you want this alert to generate. Options are *EMAIL*, *SNS*, *PAGER_DUTY*, *SYSDIG_DUMP*. - - **enabled**: if True, the alert will be enabled when created. - - **annotations**: an optional dictionary of custom properties that you can associate to this alert for automation or management reasons - - **alert_obj**: an optional fully-formed Alert object of the format returned in an "alerts" list by :func:`~SdcClient.get_alerts` This is an alternative to creating the Alert using the individual parameters listed above. - - **Success Return Value** - A dictionary describing the just created alert, with the format described at `this link `__ - - **Example** - `examples/create_alert.py `_ - ''' - # - # Get the list of alerts from the server - # - res = requests.get(self.url + '/api/alerts', headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - j = res.json() - - if alert_obj is None: - if None in (name, description, severity, for_atleast_s, condition): - return [False, 'Must specify a full Alert object or all parameters: name, description, severity, for_atleast_s, condition'] - else: - # - # Populate the alert information - # - alert_json = { - 'alert' : { - 'type' : 'MANUAL', - 'name' : name, - 'description' : description, - 'enabled' : enabled, - 'severity' : severity, - 'timespan' : for_atleast_s * 1000000, - 'condition' : condition, - 'filter': user_filter - } - } - - if segmentby != None and segmentby != []: - alert_json['alert']['segmentBy'] = segmentby - alert_json['alert']['segmentCondition'] = {'type' : segment_condition} - - if annotations != None and annotations != {}: - alert_json['alert']['annotations'] = annotations - - if notify != None: - alert_json['alert']['notificationChannelIds'] = notify - else: - # The REST API enforces "Alert ID and version must be null", so remove them if present, - # since these would have been there in a dump from the list_alerts.py example. - alert_obj.pop('id', None) - alert_obj.pop('version', None) - alert_json = { - 'alert' : alert_obj - } - - # - # Create the new alert - # - res = requests.post(self.url + '/api/alerts', headers=self.hdrs, data=json.dumps(alert_json), verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - return [True, res.json()] - - def update_alert(self, alert): - '''**Description** - Update a modified threshold-based alert. - - **Arguments** - - **alert**: one modified alert object of the same format as those in the list returned by :func:`~SdcClient.get_alerts`. - - **Success Return Value** - The updated alert. - - **Example** - `examples/update_alert.py `_ - ''' - if 'id' not in alert: - return [False, "Invalid alert format"] - - res = requests.put(self.url + '/api/alerts/' + str(alert['id']), headers=self.hdrs, data=json.dumps({ "alert": alert}), verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - return [True, res.json()] - - def delete_alert(self, alert): - '''**Description** - Deletes an alert. - - **Arguments** - - **alert**: the alert dictionary as returned by :func:`~SdcClient.get_alerts`. - - **Success Return Value** - ``None``. - - **Example** - `examples/delete_alert.py `_ - ''' - if 'id' not in alert: - return [False, 'Invalid alert format'] - - res = requests.delete(self.url + '/api/alerts/' + str(alert['id']), headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - return [True, None] - - def get_explore_grouping_hierarchy(self): - '''**Description** - Return the user's current grouping hierarchy as visible in the Explore tab of Sysdig Monitor. - - **Success Return Value** - A list containing the list of the user's Explore grouping criteria. - - **Example** - `examples/print_explore_grouping.py `_ - ''' - res = requests.get(self.url + '/api/groupConfigurations', headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - data = res.json() - - if 'groupConfigurations' not in data: - return [False, 'corrupted groupConfigurations API response'] - - gconfs = data['groupConfigurations'] - - for gconf in gconfs: - if gconf['id'] == 'explore': - res = [] - items = gconf['groups'][0]['groupBy'] - - for item in items: - res.append(item['metric']) - - return [True, res] - - return [False, 'corrupted groupConfigurations API response, missing "explore" entry'] - - def set_explore_grouping_hierarchy(self, new_hierarchy): - '''**Description** - Changes the grouping hierarchy in the Explore panel of the current user. - - **Arguments** - - **new_hierarchy**: a list of sysdig segmentation metrics indicating the new grouping hierarchy. - ''' - body = { - 'id': 'explore', - 'groups': [{'groupBy':[]}] - } - - for item in new_hierarchy: - body['groups'][0]['groupBy'].append({'metric': item}) - - res = requests.put(self.url + '/api/groupConfigurations/explore', headers=self.hdrs, - data=json.dumps(body), verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - else: - return [True, None] - - def get_views_list(self): - res = requests.get(self.url + '/api/defaultDashboards', headers=self.hdrs, - verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - return [True, res.json()] - - def get_view(self, name): - gvres = self.get_views_list() - if gvres[0] is False: - return gvres - - vlist = gvres[1]['defaultDashboards'] - - id = None - - for v in vlist: - if v['name'] == name: - id = v['id'] - break - - if not id: - return [False, 'view ' + name + ' not found'] - - res = requests.get(self.url + '/api/defaultDashboards/' + id, headers=self.hdrs, - verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - return [True, res.json()] - - def get_dashboards(self): - '''**Description** - Return the list of dashboards available under the given user account. This includes the dashboards created by the user and the ones shared with her by other users. - - **Success Return Value** - A dictionary containing the list of available sampling intervals. - - **Example** - `examples/list_dashboards.py `_ - ''' - res = requests.get(self.url + '/ui/dashboards', headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - return [True, res.json()] - - def find_dashboard_by(self, name=None): - '''**Description** - Finds dashboards with the specified name. You can then delete the dashboard (with :func:`~SdcClient.delete_dashboard`) or edit panels (with :func:`~SdcClient.add_dashboard_panel` and :func:`~SdcClient.remove_dashboard_panel`) - - **Arguments** - - **name**: the name of the dashboards to find. - - **Success Return Value** - A list of dictionaries of dashboards matching the specified name. - - **Example** - `examples/dashboard.py `_ - ''' - res = self.get_dashboards() - if res[0] is False: - return res - else: - def filter_fn(configuration): - return configuration['name'] == name - def create_item(configuration): - return {'dashboard': configuration} - - dashboards = list(map(create_item, list(filter(filter_fn, res[1]['dashboards'])))) - return [True, dashboards] - - def create_dashboard_with_configuration(self, configuration): - res = requests.post(self.url + '/ui/dashboards', headers=self.hdrs, data=json.dumps({'dashboard': configuration}), - verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - else: - return [True, res.json()] - - def create_dashboard(self, name): - ''' - **Description** - Creates an empty dashboard. You can then add panels by using ``add_dashboard_panel``. - - **Arguments** - - **name**: the name of the dashboard that will be created. - - **Success Return Value** - A dictionary showing the details of the new dashboard. - - **Example** - `examples/dashboard.py `_ - ''' - dashboard_configuration = { - 'name': name, - 'schema': 1, - 'items': [] - } - - # - # Create the new dashboard - # - res = requests.post(self.url + '/ui/dashboards', headers=self.hdrs, data=json.dumps({'dashboard': dashboard_configuration}), - verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - else: - return [True, res.json()] - - def add_dashboard_panel(self, dashboard, name, panel_type, metrics, scope=None, sort_by=None, limit=None, layout=None): - """**Description** - Adds a panel to the dashboard. A panel can be a time series, or a top chart (i.e. bar chart), or a number panel. - - **Arguments** - - **dashboard**: dashboard to edit - - **name**: name of the new panel - - **panel_type**: type of the new panel. Valid values are: ``timeSeries``, ``top``, ``number`` - - **metrics**: a list of dictionaries, specifying the metrics to show in the panel, and optionally, if there is only one metric, a grouping key to segment that metric by. A metric is any of the entries that can be found in the *Metrics* section of the Explore page in Sysdig Monitor. Metric entries require an *aggregations* section specifying how to aggregate the metric across time and groups of containers/hosts. A grouping key is any of the entries that can be found in the *Show* or *Segment By* sections of the Explore page in Sysdig Monitor. Refer to the examples section below for ready to use code snippets. Note, certain panels allow certain combinations of metrics and grouping keys: - - ``timeSeries``: 1 or more metrics OR 1 metric + 1 grouping key - - ``top``: 1 or more metrics OR 1 metric + 1 grouping key - - ``number``: 1 metric only - - **scope**: filter to apply to the panel; must be based on metadata available in Sysdig Monitor; Example: *kubernetes.namespace.name='production' and container.image='nginx'*. - - **sort_by**: Data sorting; The parameter is optional and it's a dictionary of ``metric`` and ``mode`` (it can be ``desc`` or ``asc``) - - **limit**: This parameter sets the limit on the number of lines/bars shown in a ``timeSeries`` or ``top`` panel. In the case of more entities being available than the limit, the top entities according to the sort will be shown. The default value is 10 for ``top`` panels (for ``timeSeries`` the default is defined by Sysdig Monitor itself). Note that increasing the limit above 10 is not officially supported and may cause performance and rendering issues - - **layout**: Size and position of the panel. The dashboard layout is defined by a grid of 12 columns, each row height is equal to the column height. For example, say you want to show 2 panels at the top: one panel might be 6 x 3 (half the width, 3 rows height) located in row 1 and column 1 (top-left corner of the viewport), the second panel might be 6 x 3 located in row 1 and position 7. The location is specified by a dictionary of ``row`` (row position), ``col`` (column position), ``size_x`` (width), ``size_y`` (height). - - **Success Return Value** - A dictionary showing the details of the edited dashboard. - - **Example** - `examples/dashboard.py `_ - """ - panel_configuration = { - 'name': name, - 'showAs': None, - 'showAsType': None, - 'metrics': [], - 'gridConfiguration': { - 'col': 1, - 'row': 1, - 'size_x': 12, - 'size_y': 6 - } - } - - if panel_type == 'timeSeries': - # - # In case of a time series, the current dashboard implementation - # requires the timestamp to be explicitly specified as "key". - # However, this function uses the same abstraction of the data API - # that doesn't require to specify a timestamp key (you only need to - # specify time window and sampling) - # - metrics = copy.copy(metrics) - metrics.insert(0, {'id': 'timestamp'}) - - # - # Convert list of metrics to format used by Sysdig Monitor - # - property_names = {} - k_count = 0 - v_count = 0 - for i, metric in enumerate(metrics): - property_name = 'v' if 'aggregations' in metric else 'k' - - if property_name == 'k': - i = k_count - k_count += 1 - else: - i = v_count - v_count += 1 - property_names[metric['id']] = property_name + str(i) - - panel_configuration['metrics'].append({ - 'metricId': metric['id'], - 'aggregation': metric['aggregations']['time'] if 'aggregations' in metric else None, - 'groupAggregation': metric['aggregations']['group'] if 'aggregations' in metric else None, - 'propertyName': property_name + str(i) - }) - # - # Convert scope to format used by Sysdig Monitor - # - if scope != None: - filter_expressions = scope.strip(' \t\n\r?!.').split(" and ") - filters = [] - - for filter_expression in filter_expressions: - values = filter_expression.strip(' \t\n\r?!.').split("=") - if len(values) != 2: - return [False, "invalid scope format"] - filters.append({ - 'metric': values[0].strip(' \t\n\r?!.'), - 'op': '=', - 'value': values[1].strip(' \t\n\r"?!.'), - 'filters': None - }) - - if len(filters) > 0: - panel_configuration['filter'] = { - 'filters': { - 'logic': 'and', - 'filters': filters - } - } - - # - # Configure panel type - # - if panel_type == 'timeSeries': - panel_configuration['showAs'] = 'timeSeries' - panel_configuration['showAsType'] = 'line' - - if limit != None: - panel_configuration['paging'] = { - 'from': 0, - 'to': limit - 1 - } - - elif panel_type == 'number': - panel_configuration['showAs'] = 'summary' - panel_configuration['showAsType'] = 'summary' - elif panel_type == 'top': - panel_configuration['showAs'] = 'top' - panel_configuration['showAsType'] = 'bars' - - if sort_by is None: - panel_configuration['sorting'] = [{ - 'id': 'v0', - 'mode': 'desc' - }] - else: - panel_configuration['sorting'] = [{ - 'id': property_names[sort_by['metric']], - 'mode': sort_by['mode'] - }] - - if limit is None: - panel_configuration['paging'] = { - 'from': 0, - 'to': 10 - } - else: - panel_configuration['paging'] = { - 'from': 0, - 'to': limit - 1 - } - - - # - # Configure layout - # - if layout != None: - panel_configuration['gridConfiguration'] = layout - - # - # Clone existing dashboard... - # - dashboard_configuration = copy.deepcopy(dashboard) - dashboard_configuration['id'] = None - - # - # ... and add the new panel - # - dashboard_configuration['items'].append(panel_configuration) - - # - # Update dashboard - # - res = requests.put(self.url + '/ui/dashboards/' + str(dashboard['id']), headers=self.hdrs, data=json.dumps({'dashboard': dashboard_configuration}), - verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - else: - return [True, res.json()] - - def remove_dashboard_panel(self, dashboard, panel_name): - '''**Description** - Removes a panel from the dashboard. The panel to remove is identified by the specified ``name``. - - **Arguments** - - **name**: name of the panel to find and remove - - **Success Return Value** - A dictionary showing the details of the edited dashboard. - - **Example** - `examples/dashboard.py `_ - ''' - # - # Clone existing dashboard... - # - dashboard_configuration = copy.deepcopy(dashboard) - dashboard_configuration['id'] = None - - # - # ... find the panel - # - def filter_fn(panel): - return panel['name'] == panel_name - panels = list(filter(filter_fn, dashboard_configuration['items'])) - - if len(panels) > 0: - # - # ... and remove it - # - for panel in panels: - dashboard_configuration['items'].remove(panel) - - # - # Update dashboard - # - res = requests.put(self.url + '/ui/dashboards/' + str(dashboard['id']), headers=self.hdrs, data=json.dumps({'dashboard': dashboard_configuration}), - verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - else: - return [True, res.json()] - else: - return [False, 'Not found'] - - def create_dashboard_from_template(self, dashboard_name, template, scope, shared=False, public=False, annotations={}): - if scope is not None: - if isinstance(scope, basestring) == False: - return [False, 'Invalid scope format: Expected a string'] - - # - # Clean up the dashboard we retireved so it's ready to be pushed - # - template['id'] = None - template['version'] = None - template['schema'] = 1 - template['name'] = dashboard_name - template['isShared'] = shared - template['isPublic'] = public - template['publicToken'] = None - - # - # set dashboard scope to the specific parameter - # NOTE: Individual panels might override the dashboard scope, the override will NOT be reset - # - template['filterExpression'] = scope - - if 'items' in template: - for chart in template['items']: - if 'overrideFilter' in chart and chart['overrideFilter'] == False: - # patch frontend bug to hide scope override warning even when it's not really overridden - chart['scope'] = scope - - if 'annotations' in template: - template['annotations'].update(annotations) - else: - template['annotations'] = annotations - - template['annotations']['createdByEngine'] = True - - # - # Create the new dashboard - # - res = requests.post(self.url + '/ui/dashboards', headers=self.hdrs, data=json.dumps({'dashboard': template}), verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - else: - return [True, res.json()] - - def create_dashboard_from_view(self, newdashname, viewname, filter, shared=False, public=False, annotations={}): - '''**Description** - Create a new dasboard using one of the Sysdig Monitor views as a template. You will be able to define the scope of the new dashboard. - - **Arguments** - - **newdashname**: the name of the dashboard that will be created. - - **viewname**: the name of the view to use as the template for the new dashboard. This corresponds to the name that the view has in the Explore page. - - **filter**: a boolean expression combining Sysdig Monitor segmentation criteria that defines what the new dasboard will be applied to. For example: *kubernetes.namespace.name='production' and container.image='nginx'*. - - **shared**: if set to True, the new dashboard will be a shared one. - - **public**: if set to True, the new dashboard will be shared with public token. - - **annotations**: an optional dictionary of custom properties that you can associate to this dashboard for automation or management reasons - - **Success Return Value** - A dictionary showing the details of the new dashboard. - - **Example** - `examples/create_dashboard.py `_ - ''' - # - # Find our template view - # - gvres = self.get_view(viewname) - if gvres[0] is False: - return gvres - - view = gvres[1]['defaultDashboard'] - - view['timeMode'] = {'mode' : 1} - view['time'] = {'last' : 2 * 60 * 60 * 1000000, 'sampling' : 2 * 60 * 60 * 1000000} - - # - # Create the new dashboard - # - return self.create_dashboard_from_template(newdashname, view, filter, shared, public, annotations) - - def create_dashboard_from_dashboard(self, newdashname, templatename, filter, shared=False, public=False, annotations={}): - '''**Description** - Create a new dasboard using one of the existing dashboards as a template. You will be able to define the scope of the new dasboard. - - **Arguments** - - **newdashname**: the name of the dashboard that will be created. - - **viewname**: the name of the dasboard to use as the template, as it appears in the Sysdig Monitor dashboard page. - - **filter**: a boolean expression combining Sysdig Monitor segmentation criteria defines what the new dasboard will be applied to. For example: *kubernetes.namespace.name='production' and container.image='nginx'*. - - **shared**: if set to True, the new dashboard will be a shared one. - - **public**: if set to True, the new dashboard will be shared with public token. - - **annotations**: an optional dictionary of custom properties that you can associate to this dashboard for automation or management reasons - - **Success Return Value** - A dictionary showing the details of the new dashboard. - - **Example** - `examples/create_dashboard.py `_ - ''' - # - # Get the list of dashboards from the server - # - res = requests.get(self.url + '/ui/dashboards', headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - j = res.json() - - # - # Find our template dashboard - # - dboard = None - - for db in j['dashboards']: - if db['name'] == templatename: - dboard = db - break - - if dboard is None: - self.lasterr = 'can\'t find dashboard ' + templatename + ' to use as a template' - return [False, self.lasterr] - - # - # Create the dashboard - # - return self.create_dashboard_from_template(newdashname, dboard, filter, shared, public, annotations) - - def create_dashboard_from_file(self, newdashname, filename, filter, shared=False, public=False, annotations={}): - ''' - **Description** - Create a new dasboard using a dashboard template saved to disk. - - **Arguments** - - **newdashname**: the name of the dashboard that will be created. - - **filename**: name of a file containing a JSON object for a dashboard in the format of an array element returned by :func:`~SdcClient.get_dashboards` - - **filter**: a boolean expression combining Sysdig Monitor segmentation criteria defines what the new dasboard will be applied to. For example: *kubernetes.namespace.name='production' and container.image='nginx'*. - - **shared**: if set to True, the new dashboard will be a shared one. - - **public**: if set to True, the new dashboard will be shared with public token. - - **annotations**: an optional dictionary of custom properties that you can associate to this dashboard for automation or management reasons - - **Success Return Value** - A dictionary showing the details of the new dashboard. - - **Example** - `examples/dashboard_save_load.py `_ - ''' - # - # Load the Dashboard - # - with open(filename) as data_file: - dboard = json.load(data_file) - - dboard['timeMode'] = {'mode' : 1} - dboard['time'] = {'last' : 2 * 60 * 60 * 1000000, 'sampling' : 2 * 60 * 60 * 1000000} - - # - # Create the new dashboard - # - return self.create_dashboard_from_template(newdashname, dboard, filter, shared, public, annotations) - - def delete_dashboard(self, dashboard): - '''**Description** - Deletes a dashboard. - - **Arguments** - - **dashboard**: the dashboard object as returned by :func:`~SdcClient.get_dashboards`. - - **Success Return Value** - `None`. - - **Example** - `examples/delete_dashboard.py `_ - ''' - if 'id' not in dashboard: - return [False, "Invalid dashboard format"] - - res = requests.delete(self.url + '/ui/dashboards/' + str(dashboard['id']), headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - return [True, None] - - def get_metrics(self): - '''**Description** - Return the metric list that can be used for data requests/alerts/dashboards. - - **Success Return Value** - A dictionary containing the list of available metrics. - - **Example** - `examples/list_metrics.py `_ - ''' - res = requests.get(self.url + '/api/data/metrics', headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - return [True, res.json()] - - -# For backwards compatibility -SdcClient = SdMonitorClient - -class SdSecureClient(_SdcCommon): - - def __init__(self, token="", sdc_url='https://app.sysdigcloud.com', ssl_verify=True): - super(SdSecureClient, self).__init__(token, sdc_url, ssl_verify) - - self.customer_id = None - - def _get_falco_rules(self, kind): - res = requests.get(self.url + '/api/settings/falco/{}RulesFile'.format(kind), headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - data = res.json() - return [True, data] - - def get_system_falco_rules(self): - '''**Description** - Get the system falco rules file in use for this customer. See the `Falco wiki `_ for documentation on the falco rules format. - - **Arguments** - - None - - **Success Return Value** - The contents of the system falco rules file. - - **Example** - `examples/get_secure_system_falco_rules.py `_ - ''' - - return self._get_falco_rules("system") - - def get_user_falco_rules(self): - '''**Description** - Get the user falco rules file in use for this customer. See the `Falco wiki `_ for documentation on the falco rules format. - - **Arguments** - - None - - **Success Return Value** - The contents of the user falco rules file. - - **Example** - `examples/get_secure_user_falco_rules.py `_ - ''' - return self._get_falco_rules("user") - - def _set_falco_rules(self, kind, rules_content): - payload = self._get_falco_rules(kind) - - if not payload[0]: - return payload - - payload[1]["{}RulesFile".format(kind)]["content"] = rules_content # pylint: disable=unsubscriptable-object - - res = requests.put(self.url + '/api/settings/falco/{}RulesFile'.format(kind), headers=self.hdrs, data=json.dumps(payload[1]), verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - return [True, res.json()] - - def set_system_falco_rules(self, rules_content): - '''**Description** - Set the system falco rules file in use for this customer. NOTE: This API endpoint can *only* be used in on-premise deployments. Generally the system falco rules file is only modified in conjunction with Sysdig support. See the `Falco wiki `_ for documentation on the falco rules format. - - **Arguments** - - A string containing the system falco rules. - - **Success Return Value** - The contents of the system falco rules file that were just updated. - - **Example** - `examples/set_secure_system_falco_rules.py `_ - - ''' - return self._set_falco_rules("system", rules_content) - - def set_user_falco_rules(self, rules_content): - '''**Description** - Set the user falco rules file in use for this customer. See the `Falco wiki `_ for documentation on the falco rules format. - - **Arguments** - - A string containing the user falco rules. - - **Success Return Value** - The contents of the user falco rules file that were just updated. - - **Example** - `examples/set_secure_user_falco_rules.py `_ - - ''' - return self._set_falco_rules("user", rules_content) - - # Only one kind for now called "default", but might add a "custom" kind later. - def _get_falco_rules_files(self, kind): - - res = requests.get(self.url + '/api/settings/falco/{}RulesFiles'.format(kind), headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - data = res.json() - - return [True, data] - - def get_default_falco_rules_files(self): - '''**Description** - Get the set of falco rules files from the backend. The _files programs and endpoints are a - replacement for the system_file endpoints and allow for publishing multiple files instead - of a single file as well as publishing multiple variants of a given file that are compatible - with different agent versions. - - **Arguments** - - None - - **Success Return Value** - A dict with the following keys: - - tag: A string used to uniquely identify this set of rules. It is recommended that this tag change every time the set of rules is updated. - - files: An array of dicts. Each dict has the following keys: - - name: the name of the file - - variants: An array of dicts with the following keys: - - requiredEngineVersion: the minimum falco engine version that can read this file - - content: the falco rules content - An example would be: - {'tag': 'v1.5.9', - 'files': [ - { - 'name': 'falco_rules.yaml', - 'variants': [ - { - 'content': '- required_engine_version: 29\n\n- list: foo\n', - 'requiredEngineVersion': 29 - }, - { - 'content': '- required_engine_version: 1\n\n- list: foo\n', - 'requiredEngineVersion': 1 - } - ] - }, - { - 'name': 'k8s_audit_rules.yaml', - 'variants': [ - { - 'content': '# some comment\n', - 'requiredEngineVersion': 0 - } - ] - } - ] - } - - **Example** - `examples/get_default_falco_rules_files.py `_ - ''' - - res = self._get_falco_rules_files("default") - - if not res[0]: - return res - else: - res_obj = res[1]["defaultFalcoRulesFiles"] - - # Copy only the tag and files over - ret = {} - - if "tag" in res_obj: - ret["tag"] = res_obj["tag"] - - if "files" in res_obj: - ret["files"] = res_obj["files"] - - return [True, ret] - - def save_default_falco_rules_files(self, fsobj, save_dir): - '''**Description** - Given a dict returned from get_default_falco_rules_files, save those files to a set of files below save_dir. - The first level below save_dir is a directory with the tag name. The second level is a directory per file. - The third level is a directory per variant. Finally the files are at the lowest level, in a file called "content". - For example, using the example dict in get_default_falco_rules_files(), the directory layout would look like: - save_dir/ - v1.5.9/ - falco_rules.yaml/ - 29/ - content: a file containing "- required_engine_version: 29\n\n- list: foo\n" - 1/ - content: a file containing "- required_engine_version: 1\n\n- list: foo\n" - k8s_audit_rules.yaml/ - 0/ - content: a file containing "# some comment" - **Arguments** - - fsobj: a python dict matching the structure returned by get_default_falco_rules_files() - - save_dir: a directory path under which to save the files. If the path already exists, it will be removed first. - - **Success Return Value** - - None - - **Example** - `examples/get_default_falco_rules_files.py `_ - ''' - if os.path.exists(save_dir): - try: - if os.path.isdir(save_dir): - shutil.rmtree(save_dir) - else: - os.unlink(save_dir) - except Exception as e: - return [False, "Could not remove existing save dir {}: {}".format(save_dir, str(e))] - - prefix = os.path.join(save_dir, fsobj["tag"]) - try: - os.makedirs(prefix) - except Exception as e: - return [False, "Could not create tag directory {}: {}".format(prefix, str(e))] - - if "files" in fsobj: - for fobj in fsobj["files"]: - fprefix = os.path.join(prefix, fobj["name"]) - try: - os.makedirs(fprefix) - except Exception as e: - return [False, "Could not create file directory {}: {}".format(fprefix, str(e))] - for variant in fobj["variants"]: - vprefix = os.path.join(fprefix, str(variant["requiredEngineVersion"])) - try: - os.makedirs(vprefix) - except Exception as e: - return [False, "Could not create variant directory {}: {}".format(vprefix, str(e))] - cpath = os.path.join(vprefix, "content") - try: - with open(cpath, "w") as cfile: - cfile.write(variant["content"]) - except Exception as e: - return [False, "Could not write content to {}: {}".format(cfile, str(e))] - - return [True, None] - - # Only One kind for now, but might add a "custom" kind later. - def _set_falco_rules_files(self, kind, rules_files): - - payload = self._get_falco_rules_files(kind) - - if not payload[0]: - return payload - - obj = payload[1]["{}FalcoRulesFiles".format(kind)] # pylint: disable=unsubscriptable-object - - obj["tag"] = rules_files["tag"] - obj["files"] = rules_files["files"] - - res = requests.put(self.url + '/api/settings/falco/{}RulesFiles'.format(kind), headers=self.hdrs, data=json.dumps(payload[1]), verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - return [True, res.json()] - - def set_default_falco_rules_files(self, rules_files): - '''**Description** - Update the set of falco rules files to the provided set of files. See the `Falco wiki `_ for documentation on the falco rules format. - The _files programs and endpoints are a replacement for the system_file endpoints and - allow for publishing multiple files instead of a single file as well as publishing - multiple variants of a given file that are compatible with different agent versions. - - **Arguments** - - rules_files: a dict with the same structure as returned by get_default_falco_rules_files. - - **Success Return Value** - The contents of the default falco rules files that were just updated. - - **Example** - `examples/set_default_falco_rules_files.py `_ - - ''' - - return self._set_falco_rules_files("default", rules_files) - - def load_default_falco_rules_files(self, save_dir): - '''**Description** - Given a file and directory layout as described in save_default_falco_rules_files(), load those files and - return a dict representing the contents. This dict is suitable for passing to set_default_falco_rules_files(). - - **Arguments** - - save_dir: a directory path from which to load the files. - - **Success Return Value** - - A dict matching the format described in get_default_falco_rules_files. - - **Example** - `examples/set_default_falco_rules_files.py `_ - ''' - - tags = os.listdir(save_dir) - if len(tags) != 1: - return [False, "Directory {} did not contain exactly 1 entry".format(save_dir)] - - tpath = os.path.join(save_dir, tags[0]) - - if not os.path.isdir(tpath): - return [False, "Tag path {} is not a directory".format(tpath)] - - ret = {"tag": os.path.basename(tpath), "files": []} - - for fdir in os.listdir(tpath): - fpath = os.path.join(tpath, fdir) - if not os.path.isdir(fpath): - return [False, "File path {} is not a directory".format(fpath)] - fobj = {"name": os.path.basename(fpath), "variants": []} - for vdir in os.listdir(fpath): - vpath = os.path.join(fpath, vdir) - if not os.path.isdir(vpath): - return [False, "Variant path {} is not a directory".format(vpath)] - cpath = os.path.join(vpath, "content") - try: - with open(cpath, 'r') as content_file: - try: - required_engine_version = int(os.path.basename(vpath)) - if vpath < 0: - return [False, "Variant directory {} must be a positive number".format(vpath)] - fobj["variants"].append({ - "requiredEngineVersion": required_engine_version, - "content": content_file.read() - }) - except ValueError: - return [False, "Variant directory {} must be a number".format(vpath)] - except Exception as e: - return [False, "Could not read content at {}: {}".format(cpath, str(e))] - - ret["files"].append(fobj) - - return [True, ret] - - def _get_policy_events_int(self, ctx): - policy_events_url = self.url + '/api/policyEvents?from={:d}&to={:d}&offset={}&limit={}'.format(int(ctx['from']), int(ctx['to']), ctx['offset'], ctx['limit']) - if 'sampling' in ctx: - policy_events_url += '&sampling={:d}'.format(int(ctx['sampling'])) - - res = requests.get(policy_events_url, headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - # Increment the offset by limit - ctx['offset'] += ctx['limit'] - - return [True, {"ctx": ctx, "data": res.json()}] - - def get_policy_events_range(self, from_sec, to_sec, sampling=None): - '''**Description** - Fetch all policy events that occurred in the time range [from_sec:to_sec]. This method is used in conjunction - with :func:`~sdcclient.SdSecureClient.get_more_policy_events` to provide paginated access to policy events. - - **Arguments** - - from_sec: the start of the timerange for which to get events - - end_sec: the end of the timerange for which to get events - - sampling: sample all policy events using *sampling* interval. - - **Success Return Value** - An array containing: - - A context object that should be passed to later calls to get_more_policy_events. - - An array of policy events, in JSON format. See :func:`~sdcclient.SdSecureClient.get_more_policy_events` - for details on the contents of policy events. - - **Example** - `examples/get_secure_policy_events.py `_ - - ''' - ctx = {"from": int(from_sec) * 1000000, - "to": int(to_sec) * 1000000, - "offset": 0, - "limit": 1000} - - if sampling is not None: - ctx["sampling"] = sampling - - return self._get_policy_events_int(ctx) - - def get_policy_events_duration(self, duration_sec, sampling=None): - '''**Description** - Fetch all policy events that occurred in the last duration_sec seconds. This method is used in conjunction with - :func:`~sdcclient.SdSecureClient.get_more_policy_events` to provide paginated access to policy events. - - **Arguments** - - duration_sec: Fetch all policy events that have occurred in the last *duration_sec* seconds. - - sampling: Sample all policy events using *sampling* interval. - - **Success Return Value** - An array containing: - - A context object that should be passed to later calls to get_more_policy_events. - - An array of policy events, in JSON format. See :func:`~sdcclient.SdSecureClient.get_more_policy_events` - for details on the contents of policy events. - - **Example** - `examples/get_secure_policy_events.py `_ - - ''' - epoch = datetime.datetime.utcfromtimestamp(0) - - to_ts = (datetime.datetime.utcnow()-epoch).total_seconds() * 1000 * 1000 - from_ts = to_ts - (int(duration_sec) * 1000 * 1000) - ctx = {"to": to_ts, - "from": from_ts, - "offset": 0, - "limit": 1000} - - if sampling is not None: - ctx["sampling"] = sampling - - return self._get_policy_events_int(ctx) - - def get_more_policy_events(self, ctx): - '''**Description** - Fetch additional policy events after an initial call to :func:`~sdcclient.SdSecureClient.get_policy_events_range` / - :func:`~sdcclient.SdSecureClient.get_policy_events_duration` or a prior call to get_more_policy_events. - - **Arguments** - - ctx: a context object returned from an initial call to :func:`~sdcclient.SdSecureClient.get_policy_events_range` / - :func:`~sdcclient.SdSecureClient.get_policy_events_duration` or a prior call to get_more_policy_events. - - **Success Return Value** - An array containing: - - A context object that should be passed to later calls to get_more_policy_events() - - An array of policy events, in JSON format. Each policy event contains the following: - - hostMac: the mac address of the machine where the event occurred - - severity: a severity level from 1-7 - - timestamp: when the event occurred (ns since the epoch) - - version: a version number for this message (currently 1) - - policyId: a reference to the policy that generated this policy event - - output: A string describing the event that occurred - - id: a unique identifier for this policy event - - isAggregated: if true, this is a combination of multiple policy events - - containerId: the container in which the policy event occurred - - When the number of policy events returned is 0, there are no remaining events and you can stop calling get_more_policy_events(). - - **Example** - `examples/get_secure_policy_events.py `_ - ''' - return self._get_policy_events_int(ctx) - - def create_default_policies(self): - '''**Description** - Create a set of default policies using the current system falco rules file as a reference. For every falco rule in the system - falco rules file, one policy will be created. The policy will take the name and description from the name and description of - the corresponding falco rule. If a policy already exists with the same name, no policy is added or modified. Existing - policies will be unchanged. - - **Arguments** - - None - - **Success Return Value** - JSON containing details on any new policies that were added. - - **Example** - `examples/create_default_policies.py `_ - - ''' - res = requests.post(self.url + '/api/policies/createDefault', headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - return [True, res.json()] - - def delete_all_policies(self): - '''**Description** - Delete all existing policies. The falco rules file is unchanged. - - **Arguments** - - None - - **Success Return Value** - The string "Policies Deleted" - - **Example** - `examples/delete_all_policies.py `_ - - ''' - res = requests.post(self.url + '/api/policies/deleteAll', headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - return [True, "Policies Deleted"] - - def list_policies(self): - '''**Description** - List the current set of policies. - - **Arguments** - - None - - **Success Return Value** - A JSON object containing the number and details of each policy. - - **Example** - `examples/list_policies.py `_ - - ''' - res = requests.get(self.url + '/api/policies', headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - return [True, res.json()] - - def get_policy_priorities(self): - '''**Description** - Get a list of policy ids in the order they will be evaluated. - - **Arguments** - - None - - **Success Return Value** - A JSON object representing the list of policy ids. - - **Example** - `examples/list_policies.py `_ - - ''' - - res = requests.get(self.url + '/api/policies/priorities', headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - return [True, res.json()] - - def set_policy_priorities(self, priorities_json): - '''**Description** - Change the policy evaluation order - - **Arguments** - - priorities_json: a description of the new policy order. - - **Success Return Value** - A JSON object representing the updated list of policy ids. - - **Example** - `examples/set_policy_order.py `_ - - ''' - - try: - priorities_obj = json.loads(priorities_json) - except Exception as e: - return [False, "priorities json is not valid json: {}".format(str(e))] - - res = requests.put(self.url + '/api/policies/priorities', headers=self.hdrs, data=priorities_json, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - return [True, res.json()] - - - def get_policy(self, name): - '''**Description** - Find the policy with name and return its json description. - - **Arguments** - - name: the name of the policy to fetch - - **Success Return Value** - A JSON object containing the description of the policy. If there is no policy with - the given name, returns False. - - **Example** - `examples/get_policy.py `_ - - ''' - res = requests.get(self.url + '/api/policies', headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - policies = res.json()["policies"] - - # Find the policy with the given name and return it. - for policy in policies: - if policy["name"] == name: - return [True, policy] - - return [False, "No policy with name {}".format(name)] - - def add_policy(self, policy_json): - '''**Description** - Add a new policy using the provided json. - - **Arguments** - - policy_json: a description of the new policy - - **Success Return Value** - The string "OK" - - **Example** - `examples/add_policy.py `_ - - ''' - - try: - policy_obj = json.loads(policy_json) - except Exception as e: - return [False, "policy json is not valid json: {}".format(str(e))] - - body = {"policy": policy_obj} - res = requests.post(self.url + '/api/policies', headers=self.hdrs, data=json.dumps(body), verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - return [True, res.json()] - - def update_policy(self, policy_json): - '''**Description** - Update an existing policy using the provided json. The 'id' field from the policy is - used to determine which policy to update. - - **Arguments** - - policy_json: a description of the new policy - - **Success Return Value** - The string "OK" - - **Example** - `examples/update_policy.py `_ - - ''' - - try: - policy_obj = json.loads(policy_json) - except Exception as e: - return [False, "policy json is not valid json: {}".format(str(e))] - - if not "id" in policy_obj: - return [False, "Policy Json does not have an 'id' field"] - - body = {"policy": policy_obj} - - res = requests.put(self.url + '/api/policies/{}'.format(policy_obj["id"]), headers=self.hdrs, data=json.dumps(body), verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - return [True, res.json()] - - def delete_policy_name(self, name): - '''**Description** - Delete the policy with the given name. - - **Arguments** - - name: the name of the policy to delete - - **Success Return Value** - The JSON object representing the now-deleted policy. - - **Example** - `examples/delete_policy.py `_ - - ''' - res = requests.get(self.url + '/api/policies', headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - # Find the policy with the given name and delete it - for policy in res.json()["policies"]: - if policy["name"] == name: - return self.delete_policy_id(policy["id"]) - - return [False, "No policy with name {}".format(name)] - - def delete_policy_id(self, id): - '''**Description** - Delete the policy with the given id - - **Arguments** - - id: the id of the policy to delete - - **Success Return Value** - The JSON object representing the now-deleted policy. - - **Example** - `examples/delete_policy.py `_ - - ''' - res = requests.delete(self.url + '/api/policies/{}'.format(id), headers=self.hdrs, verify=self.ssl_verify) - if not self._checkResponse(res): - return [False, self.lasterr] - - return [True, res.json()] diff --git a/sdcclient/_common.py b/sdcclient/_common.py new file mode 100644 index 00000000..ed88b98c --- /dev/null +++ b/sdcclient/_common.py @@ -0,0 +1,999 @@ +import os +import json +import requests + + +class _SdcCommon(object): + '''Interact with the Sysdig Monitor/Secure API. + + **Arguments** + - **token**: A Sysdig Monitor/Secure API token from the *Sysdig Cloud API* section of the Settings page for `monitor `_ or .`secure `_. + - **sdc_url**: URL for contacting the Sysdig API server. Set this in `On-Premises installs `__. + - **ssl_verify**: Whether to verify certificate. Set to False if using a self-signed certificate in an `On-Premises install `__. + + **Returns** + An object for further interactions with the Sysdig Monitor/Secure API. See methods below. + ''' + lasterr = None + + def __init__(self, token="", sdc_url='https://app.sysdigcloud.com', ssl_verify=True): + self.token = os.environ.get("SDC_TOKEN", token) + self.hdrs = {'Authorization': 'Bearer ' + self.token, 'Content-Type': 'application/json'} + self.url = os.environ.get("SDC_URL", sdc_url) + self.ssl_verify = os.environ.get("SDC_SSL_VERIFY", None) + if self.ssl_verify == None: + self.ssl_verify = ssl_verify + else: + self.ssl_verify = self.ssl_verify.lower() == 'true' + + def _checkResponse(self, res): + if res.status_code >= 300: + errorcode = res.status_code + self.lasterr = None + + try: + j = res.json() + except Exception: + self.lasterr = 'status code ' + str(errorcode) + return False + + if 'errors' in j: + if 'message' in j['errors'][0]: + self.lasterr = j['errors'][0]['message'] + + if 'reason' in j['errors'][0]: + if self.lasterr is not None: + self.lasterr += ' ' + else: + self.lasrerr = '' + + self.lasterr += j['errors'][0]['reason'] + elif 'message' in j: + self.lasterr = j['message'] + else: + self.lasterr = 'status code ' + str(errorcode) + return False + return True + + def get_user_info(self): + '''**Description** + Get details about the current user. + + **Success Return Value** + A dictionary containing information about the user, for example its email and the maximum number of agents it can install. + + **Example** + `examples/print_user_info.py `_ + ''' + res = requests.get(self.url + '/api/user/me', headers=self.hdrs, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + return [True, res.json()] + + def get_user_token(self): + '''**Description** + Return the API token of the current user. + + **Success Return Value** + A string containing the user token. + ''' + res = requests.get(self.url + '/api/token', headers=self.hdrs, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + tkinfo = res.json() + + return [True, tkinfo['token']['key']] + + def get_connected_agents(self): + '''**Description** + Return the agents currently connected to Sysdig Monitor for the current user. + + **Success Return Value** + A list of the agents with all their attributes. + ''' + res = requests.get(self.url + '/api/agents/connected', headers=self.hdrs, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + data = res.json() + return [True, data['agents']] + + def get_n_connected_agents(self): + '''**Description** + Return the number of agents currently connected to Sysdig Monitor for the current user. + + **Success Return Value** + An integer number. + ''' + res = requests.get(self.url + '/api/agents/connected', headers=self.hdrs, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + data = res.json() + return [True, data['total']] + + def get_notification_ids(self, channels=None): + '''**Description** + Get an array of all configured Notification Channel IDs, or a filtered subset of them. + + **Arguments** + - **channels**: an optional array of dictionaries to limit the set of Notification Channel IDs returned. If not specified, IDs for all configured Notification Channels are returned. Each dictionary contains a ``type`` field that can be one of the available types of Notification Channel (``EMAIL``, ``SNS``, ``PAGER_DUTY``, ``SLACK``, ``OPSGENIE``, ``VICTOROPS``, ``WEBHOOK``) as well as additional elements specific to each channel type. + + **Success Return Value** + An array of Notification Channel IDs (integers). + + **Examples** + - `examples/create_alert.py `_ + - `examples/restore_alerts.py `_ + ''' + + res = requests.get(self.url + '/api/notificationChannels', headers=self.hdrs, verify=self.ssl_verify) + + if not self._checkResponse(res): + return [False, self.lasterr] + + ids = [] + + # If no array of channel types/names was provided to filter by, + # just return them all. + if channels is None: + for ch in res.json()["notificationChannels"]: + ids.append(ch['id']) + return [True, ids] + + # Return the filtered set of channels based on the provided types/names array. + # Should try and improve this M * N lookup + for c in channels: + found = False + for ch in res.json()["notificationChannels"]: + if c['type'] == ch['type']: + if c['type'] == 'SNS': + opt = ch['options'] + if set(opt['snsTopicARNs']) == set(c['snsTopicARNs']): + found = True + ids.append(ch['id']) + elif c['type'] == 'EMAIL': + opt = ch['options'] + if 'emailRecipients' in c: + if set(c['emailRecipients']) == set(opt['emailRecipients']): + found = True + ids.append(ch['id']) + elif 'name' in c: + if c['name'] == ch.get('name'): + found = True + ids.append(ch['id']) + elif c['type'] == 'PAGER_DUTY': + opt = ch['options'] + if opt['account'] == c['account'] and opt['serviceName'] == c['serviceName']: + found = True + ids.append(ch['id']) + elif c['type'] == 'SLACK': + opt = ch['options'] + if 'channel' in opt and opt['channel'] == c['channel']: + found = True + ids.append(ch['id']) + elif c['type'] == 'OPSGENIE': + if 'name' in c: + if c['name'] == ch.get('name'): + found = True + ids.append(ch['id']) + elif c['type'] == 'VICTOROPS': + if 'name' in c: + if c['name'] == ch.get('name'): + found = True + ids.append(ch['id']) + elif c['type'] == 'WEBHOOK': + if 'name' in c: + if c['name'] == ch.get('name'): + found = True + ids.append(ch['id']) + if not found: + return [False, "Channel not found: " + str(c)] + + return [True, ids] + + def create_email_notification_channel(self, channel_name, email_recipients): + channel_json = { + 'notificationChannel': { + 'type': 'EMAIL', + 'name': channel_name, + 'enabled': True, + 'options': { + 'emailRecipients': email_recipients + } + } + } + + res = requests.post(self.url + '/api/notificationChannels', headers=self.hdrs, data=json.dumps(channel_json), verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + return [True, res.json()] + + def get_notification_channel(self, id): + + res = requests.get(self.url + '/api/notificationChannels/' + str(id), headers=self.hdrs, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + + return [True, res.json()['notificationChannel']] + + def update_notification_channel(self, channel): + if 'id' not in channel: + return [False, "Invalid channel format"] + + res = requests.put(self.url + '/api/notificationChannels/' + str(channel['id']), headers=self.hdrs, data=json.dumps({"notificationChannel": channel}), verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + + return [True, res.json()] + + def delete_notification_channel(self, channel): + if 'id' not in channel: + return [False, "Invalid channel format"] + + res = requests.delete(self.url + '/api/notificationChannels/' + str(channel['id']), headers=self.hdrs, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + return [True, None] + + def get_data_retention_info(self): + '''**Description** + Return the list of data retention intervals, with beginning and end UTC time for each of them. Sysdig Monitor performs rollups of the data it stores. This means that data is stored at different time granularities depending on how far back in time it is. This call can be used to know what precision you can expect before you make a call to :func:`~SdcClient.get_data`. + + **Success Return Value** + A dictionary containing the list of available sampling intervals. + + **Example** + `examples/print_data_retention_info.py `_ + ''' + res = requests.get(self.url + '/api/history/timelines/', headers=self.hdrs, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + return [True, res.json()] + + def get_topology_map(self, grouping_hierarchy, time_window_s, sampling_time_s): + # + # Craft the time interval section + # + tlines = self.get_data_retention_info() + + for tline in tlines[1]['agents']: + if tline['sampling'] == sampling_time_s * 1000000: + timeinfo = tline + + if timeinfo is None: + return [False, "sampling time " + str(sampling_time_s) + " not supported"] + + timeinfo['from'] = timeinfo['to'] - timeinfo['sampling'] + + # + # Create the grouping hierarchy + # + gby = [{'metric': g} for g in grouping_hierarchy] + + # + # Prepare the json + # + req_json = { + 'format': { + 'type': 'map', + 'exportProcess': True + }, + 'time': timeinfo, + # 'filter': { + # 'filters': [ + # { + # 'metric': 'agent.tag.Tag', + # 'op': '=', + # 'value': 'production-maintenance', + # 'filters': None + # } + # ], + # 'logic': 'and' + # }, + 'limit': { + 'hostGroups': 20, + 'hosts': 20, + 'containers': 20, + 'processes': 10 + }, + 'group': { + 'configuration': { + 'groups': [ + { + 'filters': [], + 'groupBy': gby + } + ] + } + }, + 'nodeMetrics': [ + { + 'id': 'cpu.used.percent', + 'aggregation': 'timeAvg', + 'groupAggregation': 'avg' + } + ], + 'linkMetrics': [ + { + 'id': 'net.bytes.total', + 'aggregation': 'timeAvg', + 'groupAggregation': 'sum' + } + ] + } + + # + # Fire the request + # + res = requests.post(self.url + '/api/data?format=map', headers=self.hdrs, + data=json.dumps(req_json), verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + return [True, res.json()] + + def post_event(self, name, description=None, severity=None, event_filter=None, tags=None): + '''**Description** + Send an event to Sysdig Monitor. The events you post are available in the Events tab in the Sysdig Monitor UI and can be overlied to charts. + + **Arguments** + - **name**: the name of the new event. + - **description**: a longer description offering detailed information about the event. + - **severity**: syslog style from 0 (high) to 7 (low). + - **event_filter**: metadata, in Sysdig Monitor format, of nodes to associate with the event, e.g. ``host.hostName = 'ip-10-1-1-1' and container.name = 'foo'``. + - **tags**: a list of key-value dictionaries that can be used to tag the event. Can be used for filtering/segmenting purposes in Sysdig Monitor. + + **Success Return Value** + A dictionary describing the new event. + + **Examples** + - `examples/post_event_simple.py `_ + - `examples/post_event.py `_ + ''' + edata = { + 'event': { + 'name': name + } + } + + if description is not None: + edata['event']['description'] = description + + if severity is not None: + edata['event']['severity'] = severity + + if event_filter is not None: + edata['event']['filter'] = event_filter + + if tags is not None: + edata['event']['tags'] = tags + + res = requests.post(self.url + '/api/events/', headers=self.hdrs, data=json.dumps(edata), verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + return [True, res.json()] + + def get_events(self, name=None, from_ts=None, to_ts=None, tags=None): + '''**Description** + Returns the list of Sysdig Monitor events. + + **Arguments** + - **name**: filter events by name. + - **from_ts**: filter events by start time. Timestamp format is in UTC (seconds). + - **to_ts**: filter events by end time. Timestamp format is in UTC (seconds). + - **tags**: filter events by tags. Can be, for example ``tag1 = 'value1'``. + + **Success Return Value** + A dictionary containing the list of events. + + **Example** + `examples/list_events.py `_ + ''' + params = {} + + if name is not None: + params['name'] = name + + if from_ts is not None: + params['from'] = from_ts + + if to_ts is not None: + params['to'] = to_ts + + if tags is not None: + params['tags'] = tags + + res = requests.get(self.url + '/api/events/', headers=self.hdrs, params=params, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + return [True, res.json()] + + def delete_event(self, event): + '''**Description** + Deletes an event. + + **Arguments** + - **event**: the event object as returned by :func:`~SdcClient.get_events`. + + **Success Return Value** + `None`. + + **Example** + `examples/delete_event.py `_ + ''' + if 'id' not in event: + return [False, "Invalid event format"] + + res = requests.delete(self.url + '/api/events/' + str(event['id']), headers=self.hdrs, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + return [True, None] + + def get_data(self, metrics, start_ts, end_ts=0, sampling_s=0, + filter='', datasource_type='host', paging=None): + '''**Description** + Export metric data (both time-series and table-based). + + **Arguments** + - **metrics**: a list of dictionaries, specifying the metrics and grouping keys that the query will return. A metric is any of the entries that can be found in the *Metrics* section of the Explore page in Sysdig Monitor. Metric entries require an *aggregations* section specifying how to aggregate the metric across time and containers/hosts. A grouping key is any of the entries that can be found in the *Show* or *Segment By* sections of the Explore page in Sysdig Monitor. These entries are used to apply single or hierarchical segmentation to the returned data and don't require the aggregations section. Refer to the Example link below for ready-to-use code snippets. + - **start_ts**: the UTC time (in seconds) of the beginning of the data window. A negative value can be optionally used to indicate a relative time in the past from now. For example, -3600 means "one hour ago". + - **end_ts**: the UTC time (in seconds) of the end of the data window, or 0 to indicate "now". A negative value can also be optionally used to indicate a relative time in the past from now. For example, -3600 means "one hour ago". + - **sampling_s**: the duration of the samples that will be returned. 0 means that the whole data will be returned as a single sample. + - **filter**: a boolean expression combining Sysdig Monitor segmentation criteria that defines what the query will be applied to. For example: *kubernetes.namespace.name='production' and container.image='nginx'*. + - **datasource_type**: specify the metric source for the request, can be ``container`` or ``host``. Most metrics, for example ``cpu.used.percent`` or ``memory.bytes.used``, are reported by both hosts and containers. By default, host metrics are used, but if the request contains a container-specific grouping key in the metric list/filter (e.g. ``container.name``), then the container source is used. In cases where grouping keys are missing or apply to both hosts and containers (e.g. ``tag.Name``), *datasource_type* can be explicitly set to avoid any ambiguity and allow the user to select precisely what kind of data should be used for the request. `examples/get_data_datasource.py `_ contains a few examples that should clarify the use of this argument. + - **paging**: if segmentation of the query generates values for several different entities (e.g. containers/hosts), this parameter specifies which to include in the returned result. It's specified as a dictionary of inclusive values for ``from`` and ``to`` with the default being ``{ "from": 0, "to": 9 }``, which will return values for the "top 10" entities. The meaning of "top" is query-dependent, based on points having been sorted via the specified group aggregation, with the results sorted in ascending order if the group aggregation is ``min`` or ``none``, and descending order otherwise. + + **Success Return Value** + A dictionary with the requested data. Data is organized in a list of time samples, each of which includes a UTC timestamp and a list of values, whose content and order reflect what was specified in the *metrics* argument. + + **Examples** + - `examples/get_data_simple.py `_ + - `examples/get_data_advanced.py `_ + - `examples/list_hosts.py `_ + - `examples/get_data_datasource.py `_ + ''' + reqbody = { + 'metrics': metrics, + 'dataSourceType': datasource_type, + } + + if start_ts < 0: + reqbody['last'] = -start_ts + elif start_ts == 0: + return [False, "start_ts cannot be 0"] + else: + reqbody['start'] = start_ts + reqbody['end'] = end_ts + + if filter != '': + reqbody['filter'] = filter + + if paging is not None: + reqbody['paging'] = paging + + if sampling_s != 0: + reqbody['sampling'] = sampling_s + + res = requests.post(self.url + '/api/data/', headers=self.hdrs, data=json.dumps(reqbody), verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + return [True, res.json()] + + def get_sysdig_captures(self): + '''**Description** + Returns the list of sysdig captures for the user. + + **Success Return Value** + A dictionary containing the list of captures. + + **Example** + `examples/list_sysdig_captures.py `_ + ''' + res = requests.get(self.url + '/api/sysdig', headers=self.hdrs, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + return [True, res.json()] + + def poll_sysdig_capture(self, capture): + '''**Description** + Fetch the updated state of a sysdig capture. Can be used to poll the status of a capture that has been previously created and started with :func:`~SdcClient.create_sysdig_capture`. + + **Arguments** + - **capture**: the capture object as returned by :func:`~SdcClient.get_sysdig_captures` or :func:`~SdcClient.create_sysdig_capture`. + + **Success Return Value** + A dictionary showing the updated details of the capture. Use the ``status`` field to check the progress of a capture. + + **Example** + `examples/create_sysdig_capture.py `_ + ''' + if 'id' not in capture: + return [False, 'Invalid capture format'] + + res = requests.get(self.url + '/api/sysdig/' + str(capture['id']), headers=self.hdrs, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + return [True, res.json()] + + def create_sysdig_capture(self, hostname, capture_name, duration, capture_filter='', folder='/'): + '''**Description** + Create a new sysdig capture. The capture will be immediately started. + + **Arguments** + - **hostname**: the hostname of the instrumented host where the capture will be taken. + - **capture_name**: the name of the capture. + - **duration**: the duration of the capture, in seconds. + - **capture_filter**: a sysdig filter expression. + - **folder**: directory in the S3 bucket where the capture will be saved. + + **Success Return Value** + A dictionary showing the details of the new capture. + + **Example** + `examples/create_sysdig_capture.py `_ + ''' + res = self.get_connected_agents() + if not res[0]: + return res + + capture_agent = None + + for agent in res[1]: + if hostname == agent['hostName']: + capture_agent = agent + break + + if capture_agent is None: + return [False, hostname + ' not found'] + + data = { + 'agent': capture_agent, + 'name': capture_name, + 'duration': duration, + 'folder': folder, + 'filters': capture_filter, + 'bucketName': '' + } + + res = requests.post(self.url + '/api/sysdig', headers=self.hdrs, data=json.dumps(data), verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + return [True, res.json()] + + def create_user_invite(self, user_email, first_name=None, last_name=None, system_role=None): + '''**Description** + Invites a new user to use Sysdig Monitor. This should result in an email notification to the specified address. + + **Arguments** + - **user_email**: the email address of the user that will be invited to use Sysdig Monitor + - **first_name**: the first name of the user being invited + - **last_name**: the last name of the user being invited + - **system_role**: system-wide privilege level for this user regardless of team. specify 'ROLE_CUSTOMER' to create an Admin. if not specified, default is a non-Admin ('ROLE_USER'). + + **Success Return Value** + The newly created user. + + **Examples** + - `examples/user_team_mgmt.py `_ + - `examples/user_team_mgmt_extended.py `_ + + ''' + # Look up the list of users to see if this exists, do not create if one exists + res = requests.get(self.url + '/api/users', headers=self.hdrs, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + data = res.json() + for user in data['users']: + if user['username'] == user_email: + return [False, 'user ' + user_email + ' already exists'] + + # Create the user + user_json = {'username': user_email} + + if first_name is not None: + user_json['firstName'] = first_name + + if last_name is not None: + user_json['lastName'] = last_name + + if system_role is not None: + user_json['systemRole'] = system_role + + res = requests.post(self.url + '/api/users', headers=self.hdrs, data=json.dumps(user_json), verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + return [True, res.json()] + + def delete_user(self, user_email): + '''**Description** + Deletes a user from Sysdig Monitor. + + **Arguments** + - **user_email**: the email address of the user that will be deleted from Sysdig Monitor + + **Example** + `examples/user_team_mgmt.py `_ + ''' + res = self.get_user_ids([user_email]) + if res[0] == False: + return res + userid = res[1][0] + res = requests.delete(self.url + '/api/users/' + str(userid), headers=self.hdrs, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + return [True, None] + + def get_user(self, user_email): + res = requests.get(self.url + '/api/users', headers=self.hdrs, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + for u in res.json()['users']: + if u['username'] == user_email: + return [True, u] + return [False, 'User not found'] + + def get_users(self): + '''**Description** + Return a list containing details about all users in the Sysdig Monitor environment. The API token must have Admin rights for this to succeed. + + **Success Return Value** + A list user objects + ''' + res = requests.get(self.url + '/api/users', headers=self.hdrs, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + return [True, res.json()['users']] + + def edit_user(self, user_email, firstName=None, lastName=None, systemRole=None): + res = self.get_user(user_email) + if res[0] == False: + return res + user = res[1] + reqbody = { + 'systemRole': systemRole if systemRole else user['systemRole'], + 'username': user_email, + 'enabled': user.get('enabled', False), + 'version': user['version'] + } + + if firstName == None: + reqbody['firstName'] = user['firstName'] if 'firstName' in list(user.keys()) else '' + else: + reqbody['firstName'] = firstName + + if lastName == None: + reqbody['lastName'] = user['lastName'] if 'lastName' in list(user.keys()) else '' + else: + reqbody['lastName'] = lastName + + res = requests.put(self.url + '/api/users/' + str(user['id']), headers=self.hdrs, data=json.dumps(reqbody), verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + return [True, 'Successfully edited user'] + + def get_teams(self, team_filter=''): + '''**Description** + Return the set of teams that match the filter specified. The *team_filter* should be a substring of the names of the teams to be returned. + + **Arguments** + - **team_filter**: the team filter to match when returning the list of teams + + **Success Return Value** + The teams that match the filter. + ''' + res = requests.get(self.url + '/api/teams', headers=self.hdrs, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + ret = [t for t in res.json()['teams'] if team_filter in t['name']] + return [True, ret] + + def get_team(self, name): + '''**Description** + Return the team with the specified team name, if it is present. + + **Arguments** + - **name**: the name of the team to return + + **Success Return Value** + The requested team. + + **Example** + `examples/user_team_mgmt.py `_ + ''' + res = self.get_teams(name) + if res[0] == False: + return res + for t in res[1]: + if t['name'] == name: + return [True, t] + return [False, 'Could not find team'] + + def get_team_ids(self, teams): + res = requests.get(self.url + '/api/teams', headers=self.hdrs, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + u = [x for x in res.json()['teams'] if x['name'] in teams] + return [True, [x['id'] for x in u]] + + def _get_user_id_dict(self, users): + res = requests.get(self.url + '/api/users', headers=self.hdrs, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + u = [x for x in res.json()['users'] if x['username'] in users] + return [True, dict((user['username'], user['id']) for user in u)] + + def _get_id_user_dict(self, user_ids): + res = requests.get(self.url + '/api/users', headers=self.hdrs, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + u = [x for x in res.json()['users'] if x['id'] in user_ids] + return [True, dict((user['id'], user['username']) for user in u)] + + def get_user_ids(self, users): + res = self._get_user_id_dict(users) + if res[0] == False: + return res + else: + return [True, list(res[1].values())] + + def create_team(self, name, memberships=None, filter='', description='', show='host', theme='#7BB0B2', + perm_capture=False, perm_custom_events=False, perm_aws_data=False): + ''' + **Description** + Creates a new team + + **Arguments** + - **name**: the name of the team to create. + - **memberships**: dictionary of (user-name, team-role) pairs that should describe new memberships of the team. + - **filter**: the scope that this team is able to access within Sysdig Monitor. + - **description**: describes the team that will be created. + - **show**: possible values are *host*, *container*. + - **theme**: the color theme that Sysdig Monitor will use when displaying the team. + - **perm_capture**: if True, this team will be allowed to take sysdig captures. + - **perm_custom_events**: if True, this team will be allowed to view all custom events from every user and agent. + - **perm_aws_data**: if True, this team will have access to all AWS metrics and tags, regardless of the team's scope. + + **Success Return Value** + The newly created team. + + **Example** + `examples/user_team_mgmt.py `_ + ''' + reqbody = { + 'name': name, + 'description': description, + 'theme': theme, + 'show': show, + 'canUseSysdigCapture': perm_capture, + 'canUseCustomEvents': perm_custom_events, + 'canUseAwsMetrics': perm_aws_data, + } + + # Map user-names to IDs + if memberships != None and len(memberships) != 0: + res = self._get_user_id_dict(list(memberships.keys())) + if res[0] == False: + return [False, 'Could not fetch IDs for user names'] + reqbody['userRoles'] = [ + { + 'userId': user_id, + 'role': memberships[user_name] + } + for (user_name, user_id) in res[1].items() + ] + else: + reqbody['users'] = [] + + if filter != '': + reqbody['filter'] = filter + + res = requests.post(self.url + '/api/teams', headers=self.hdrs, data=json.dumps(reqbody), verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + return [True, res.json()] + + def edit_team(self, name, memberships=None, filter=None, description=None, show=None, theme=None, + perm_capture=None, perm_custom_events=None, perm_aws_data=None): + ''' + **Description** + Edits an existing team. All arguments are optional. Team settings for any arguments unspecified will remain at their current settings. + + **Arguments** + - **name**: the name of the team to edit. + - **memberships**: dictionary of (user-name, team-role) pairs that should describe new memberships of the team. + - **filter**: the scope that this team is able to access within Sysdig Monitor. + - **description**: describes the team that will be created. + - **show**: possible values are *host*, *container*. + - **theme**: the color theme that Sysdig Monitor will use when displaying the team. + - **perm_capture**: if True, this team will be allowed to take sysdig captures. + - **perm_custom_events**: if True, this team will be allowed to view all custom events from every user and agent. + - **perm_aws_data**: if True, this team will have access to all AWS metrics and tags, regardless of the team's scope. + + **Success Return Value** + The edited team. + + **Example** + `examples/user_team_mgmt.py `_ + ''' + res = self.get_team(name) + if res[0] == False: + return res + + t = res[1] + reqbody = { + 'name': name, + 'theme': theme if theme else t['theme'], + 'show': show if show else t['show'], + 'canUseSysdigCapture': perm_capture if perm_capture else t['canUseSysdigCapture'], + 'canUseCustomEvents': perm_custom_events if perm_custom_events else t['canUseCustomEvents'], + 'canUseAwsMetrics': perm_aws_data if perm_aws_data else t['canUseAwsMetrics'], + 'id': t['id'], + 'version': t['version'] + } + + # Handling team description + if description is not None: + reqbody['description'] = description + elif 'description' in list(t.keys()): + reqbody['description'] = t['description'] + + # Handling for users to map (user-name, team-role) pairs to memberships + if memberships != None: + res = self._get_user_id_dict(list(memberships.keys())) + if res[0] == False: + return [False, 'Could not convert user names to IDs'] + reqbody['userRoles'] = [ + { + 'userId': user_id, + 'role': memberships[user_name] + } + for (user_name, user_id) in res[1].items() + ] + elif 'userRoles' in list(t.keys()): + reqbody['userRoles'] = t['userRoles'] + else: + reqbody['userRoles'] = [] + + # Special handling for filters since we don't support blank filters + if filter != None: + reqbody['filter'] = filter + elif 'filter' in list(t.keys()): + reqbody['filter'] = t['filter'] + + res = requests.put(self.url + '/api/teams/' + str(t['id']), headers=self.hdrs, data=json.dumps(reqbody), verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + return [True, res.json()] + + def delete_team(self, name): + '''**Description** + Deletes a team from Sysdig Monitor. + + **Arguments** + - **name**: the name of the team that will be deleted from Sysdig Monitor + + **Example** + `examples/user_team_mgmt.py `_ + ''' + res = self.get_team(name) + if res[0] == False: + return res + + t = res[1] + res = requests.delete(self.url + '/api/teams/' + str(t['id']), headers=self.hdrs, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + return [True, None] + + def list_memberships(self, team): + ''' + **Description** + List all memberships for specified team. + + **Arguments** + - **team**: the name of the team for which we want to see memberships + + **Result** + Dictionary of (user-name, team-role) pairs that should describe memberships of the team. + + **Example** + `examples/user_team_mgmt_extended.py `_ + ''' + res = self.get_team(team) + if res[0] == False: + return res + + raw_memberships = res[1]['userRoles'] + user_ids = [m['userId'] for m in raw_memberships] + + res = self._get_id_user_dict(user_ids) + if res[0] == False: + return [False, 'Could not fetch IDs for user names'] + else: + id_user_dict = res[1] + + return [True, dict([(id_user_dict[m['userId']], m['role']) for m in raw_memberships])] + + def save_memberships(self, team, memberships): + ''' + **Description** + Create new user team memberships or update existing ones. + + **Arguments** + - **team**: the name of the team for which we are creating new memberships + - **memberships**: dictionary of (user-name, team-role) pairs that should describe new memberships + + **Example** + `examples/user_team_mgmt_extended.py `_ + ''' + + res = self.list_memberships(team) + + if res[0] is False: + return res + + full_memberships = res[1] + full_memberships.update(memberships) + + res = self.edit_team(team, full_memberships) + + if res[0] is False: + return res + else: + return [True, None] + + def remove_memberships(self, team, users): + ''' + **Description** + Remove user memberships from specified team. + + **Arguments** + - **team**: the name of the team from which user memberships are removed + - **users**: list of usernames which should be removed from team + + **Example** + `examples/user_team_mgmt_extended.py `_ + ''' + + res = self.list_memberships(team) + + if res[0] is False: + return res + + old_memberships = res[1] + new_memberships = {k: v for k, v in old_memberships.items() if k not in users} + + res = self.edit_team(team, new_memberships) + + if res[0] is False: + return res + else: + return [True, None] + + def get_agents_config(self): + res = requests.get(self.url + '/api/agents/config', headers=self.hdrs, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + data = res.json() + return [True, data] + + def set_agents_config(self, config): + res = requests.put(self.url + '/api/agents/config', headers=self.hdrs, data=json.dumps(config), verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + return [True, res.json()] + + def clear_agents_config(self): + data = {'files': []} + self.set_agents_config(data) + + def get_user_api_token(self, username, teamname): + res = self.get_team(teamname) + if res[0] == False: + return res + + t = res[1] + + res = requests.get(self.url + '/api/token/%s/%d' % (username, t['id']), headers=self.hdrs, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + data = res.json() + return [True, data['token']['key']] diff --git a/sdcclient/_monitor.py b/sdcclient/_monitor.py new file mode 100644 index 00000000..e1ab0f7e --- /dev/null +++ b/sdcclient/_monitor.py @@ -0,0 +1,788 @@ +import json +import copy +import requests + +from sdcclient._common import _SdcCommon + +try: + basestring +except NameError: + basestring = str + + +class SdMonitorClient(_SdcCommon): + + def __init__(self, token="", sdc_url='https://app.sysdigcloud.com', ssl_verify=True): + super(SdMonitorClient, self).__init__(token, sdc_url, ssl_verify) + + def get_alerts(self): + '''**Description** + Retrieve the list of alerts configured by the user. + + **Success Return Value** + An array of alert dictionaries, with the format described at `this link `__ + + **Example** + `examples/list_alerts.py `_ + ''' + res = requests.get(self.url + '/api/alerts', headers=self.hdrs, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + return [True, res.json()] + + def get_notifications(self, from_ts, to_ts, state=None, resolved=None): + '''**Description** + Returns the list of Sysdig Monitor alert notifications. + + **Arguments** + - **from_ts**: filter events by start time. Timestamp format is in UTC (seconds). + - **to_ts**: filter events by start time. Timestamp format is in UTC (seconds). + - **state**: filter events by alert state. Supported values are ``OK`` and ``ACTIVE``. + - **resolved**: filter events by resolution status. Supported values are ``True`` and ``False``. + + **Success Return Value** + A dictionary containing the list of notifications. + + **Example** + `examples/list_alert_notifications.py `_ + ''' + params = {} + + if from_ts is not None: + params['from'] = from_ts * 1000000 + + if to_ts is not None: + params['to'] = to_ts * 1000000 + + if state is not None: + params['state'] = state + + if resolved is not None: + params['resolved'] = resolved + + res = requests.get(self.url + '/api/notifications', headers=self.hdrs, params=params, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + return [True, res.json()] + + def update_notification_resolution(self, notification, resolved): + '''**Description** + Updates the resolution status of an alert notification. + + **Arguments** + - **notification**: notification object as returned by :func:`~SdcClient.get_notifications`. + - **resolved**: new resolution status. Supported values are ``True`` and ``False``. + + **Success Return Value** + The updated notification. + + **Example** + `examples/resolve_alert_notifications.py `_ + ''' + if 'id' not in notification: + return [False, 'Invalid notification format'] + + notification['resolved'] = resolved + data = {'notification': notification} + + res = requests.put(self.url + '/api/notifications/' + str(notification['id']), headers=self.hdrs, data=json.dumps(data), verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + return [True, res.json()] + + def create_alert(self, name=None, description=None, severity=None, for_atleast_s=None, condition=None, + segmentby=[], segment_condition='ANY', user_filter='', notify=None, enabled=True, + annotations={}, alert_obj=None): + '''**Description** + Create a threshold-based alert. + + **Arguments** + - **name**: the alert name. This will appear in the Sysdig Monitor UI and in notification emails. + - **description**: the alert description. This will appear in the Sysdig Monitor UI and in notification emails. + - **severity**: syslog-encoded alert severity. This is a number from 0 to 7 where 0 means 'emergency' and 7 is 'debug'. + - **for_atleast_s**: the number of consecutive seconds the condition must be satisfied for the alert to fire. + - **condition**: the alert condition, as described here https://app.sysdigcloud.com/apidocs/#!/Alerts/post_api_alerts + - **segmentby**: a list of Sysdig Monitor segmentation criteria that can be used to apply the alert to multiple entities. For example, segmenting a CPU alert by ['host.mac', 'proc.name'] allows to apply it to any process in any machine. + - **segment_condition**: When *segmentby* is specified (and therefore the alert will cover multiple entities) this field is used to determine when it will fire. In particular, you have two options for *segment_condition*: **ANY** (the alert will fire when at least one of the monitored entities satisfies the condition) and **ALL** (the alert will fire when all of the monitored entities satisfy the condition). + - **user_filter**: a boolean expression combining Sysdig Monitor segmentation criteria that makes it possible to reduce the scope of the alert. For example: *kubernetes.namespace.name='production' and container.image='nginx'*. + - **notify**: the type of notification you want this alert to generate. Options are *EMAIL*, *SNS*, *PAGER_DUTY*, *SYSDIG_DUMP*. + - **enabled**: if True, the alert will be enabled when created. + - **annotations**: an optional dictionary of custom properties that you can associate to this alert for automation or management reasons + - **alert_obj**: an optional fully-formed Alert object of the format returned in an "alerts" list by :func:`~SdcClient.get_alerts` This is an alternative to creating the Alert using the individual parameters listed above. + + **Success Return Value** + A dictionary describing the just created alert, with the format described at `this link `__ + + **Example** + `examples/create_alert.py `_ + ''' + # + # Get the list of alerts from the server + # + res = requests.get(self.url + '/api/alerts', headers=self.hdrs, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + res.json() + + if alert_obj is None: + if None in (name, description, severity, for_atleast_s, condition): + return [False, 'Must specify a full Alert object or all parameters: name, description, severity, for_atleast_s, condition'] + else: + # + # Populate the alert information + # + alert_json = { + 'alert': { + 'type': 'MANUAL', + 'name': name, + 'description': description, + 'enabled': enabled, + 'severity': severity, + 'timespan': for_atleast_s * 1000000, + 'condition': condition, + 'filter': user_filter + } + } + + if segmentby != None and segmentby != []: + alert_json['alert']['segmentBy'] = segmentby + alert_json['alert']['segmentCondition'] = {'type': segment_condition} + + if annotations != None and annotations != {}: + alert_json['alert']['annotations'] = annotations + + if notify != None: + alert_json['alert']['notificationChannelIds'] = notify + else: + # The REST API enforces "Alert ID and version must be null", so remove them if present, + # since these would have been there in a dump from the list_alerts.py example. + alert_obj.pop('id', None) + alert_obj.pop('version', None) + alert_json = { + 'alert': alert_obj + } + + # + # Create the new alert + # + res = requests.post(self.url + '/api/alerts', headers=self.hdrs, data=json.dumps(alert_json), verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + return [True, res.json()] + + def update_alert(self, alert): + '''**Description** + Update a modified threshold-based alert. + + **Arguments** + - **alert**: one modified alert object of the same format as those in the list returned by :func:`~SdcClient.get_alerts`. + + **Success Return Value** + The updated alert. + + **Example** + `examples/update_alert.py `_ + ''' + if 'id' not in alert: + return [False, "Invalid alert format"] + + res = requests.put(self.url + '/api/alerts/' + str(alert['id']), headers=self.hdrs, data=json.dumps({"alert": alert}), verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + + return [True, res.json()] + + def delete_alert(self, alert): + '''**Description** + Deletes an alert. + + **Arguments** + - **alert**: the alert dictionary as returned by :func:`~SdcClient.get_alerts`. + + **Success Return Value** + ``None``. + + **Example** + `examples/delete_alert.py `_ + ''' + if 'id' not in alert: + return [False, 'Invalid alert format'] + + res = requests.delete(self.url + '/api/alerts/' + str(alert['id']), headers=self.hdrs, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + + return [True, None] + + def get_explore_grouping_hierarchy(self): + '''**Description** + Return the user's current grouping hierarchy as visible in the Explore tab of Sysdig Monitor. + + **Success Return Value** + A list containing the list of the user's Explore grouping criteria. + + **Example** + `examples/print_explore_grouping.py `_ + ''' + res = requests.get(self.url + '/api/groupConfigurations', headers=self.hdrs, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + + data = res.json() + + if 'groupConfigurations' not in data: + return [False, 'corrupted groupConfigurations API response'] + + gconfs = data['groupConfigurations'] + + for gconf in gconfs: + if gconf['id'] == 'explore': + res = [] + items = gconf['groups'][0]['groupBy'] + + for item in items: + res.append(item['metric']) + + return [True, res] + + return [False, 'corrupted groupConfigurations API response, missing "explore" entry'] + + def set_explore_grouping_hierarchy(self, new_hierarchy): + '''**Description** + Changes the grouping hierarchy in the Explore panel of the current user. + + **Arguments** + - **new_hierarchy**: a list of sysdig segmentation metrics indicating the new grouping hierarchy. + ''' + body = { + 'id': 'explore', + 'groups': [{'groupBy': []}] + } + + for item in new_hierarchy: + body['groups'][0]['groupBy'].append({'metric': item}) + + res = requests.put(self.url + '/api/groupConfigurations/explore', headers=self.hdrs, + data=json.dumps(body), verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + else: + return [True, None] + + def get_views_list(self): + res = requests.get(self.url + '/api/defaultDashboards', headers=self.hdrs, + verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + return [True, res.json()] + + def get_view(self, name): + gvres = self.get_views_list() + if gvres[0] is False: + return gvres + + vlist = gvres[1]['defaultDashboards'] + + id = None + + for v in vlist: + if v['name'] == name: + id = v['id'] + break + + if not id: + return [False, 'view ' + name + ' not found'] + + res = requests.get(self.url + '/api/defaultDashboards/' + id, headers=self.hdrs, + verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + return [True, res.json()] + + def get_dashboards(self): + '''**Description** + Return the list of dashboards available under the given user account. This includes the dashboards created by the user and the ones shared with her by other users. + + **Success Return Value** + A dictionary containing the list of available sampling intervals. + + **Example** + `examples/list_dashboards.py `_ + ''' + res = requests.get(self.url + '/ui/dashboards', headers=self.hdrs, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + return [True, res.json()] + + def find_dashboard_by(self, name=None): + '''**Description** + Finds dashboards with the specified name. You can then delete the dashboard (with :func:`~SdcClient.delete_dashboard`) or edit panels (with :func:`~SdcClient.add_dashboard_panel` and :func:`~SdcClient.remove_dashboard_panel`) + + **Arguments** + - **name**: the name of the dashboards to find. + + **Success Return Value** + A list of dictionaries of dashboards matching the specified name. + + **Example** + `examples/dashboard.py `_ + ''' + res = self.get_dashboards() + if res[0] is False: + return res + else: + def filter_fn(configuration): + return configuration['name'] == name + + def create_item(configuration): + return {'dashboard': configuration} + + dashboards = list(map(create_item, list(filter(filter_fn, res[1]['dashboards'])))) + return [True, dashboards] + + def create_dashboard_with_configuration(self, configuration): + res = requests.post(self.url + '/ui/dashboards', headers=self.hdrs, data=json.dumps({'dashboard': configuration}), + verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + else: + return [True, res.json()] + + def create_dashboard(self, name): + ''' + **Description** + Creates an empty dashboard. You can then add panels by using ``add_dashboard_panel``. + + **Arguments** + - **name**: the name of the dashboard that will be created. + + **Success Return Value** + A dictionary showing the details of the new dashboard. + + **Example** + `examples/dashboard.py `_ + ''' + dashboard_configuration = { + 'name': name, + 'schema': 1, + 'items': [] + } + + # + # Create the new dashboard + # + res = requests.post(self.url + '/ui/dashboards', headers=self.hdrs, data=json.dumps({'dashboard': dashboard_configuration}), + verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + else: + return [True, res.json()] + + def add_dashboard_panel(self, dashboard, name, panel_type, metrics, scope=None, sort_by=None, limit=None, layout=None): + """**Description** + Adds a panel to the dashboard. A panel can be a time series, or a top chart (i.e. bar chart), or a number panel. + + **Arguments** + - **dashboard**: dashboard to edit + - **name**: name of the new panel + - **panel_type**: type of the new panel. Valid values are: ``timeSeries``, ``top``, ``number`` + - **metrics**: a list of dictionaries, specifying the metrics to show in the panel, and optionally, if there is only one metric, a grouping key to segment that metric by. A metric is any of the entries that can be found in the *Metrics* section of the Explore page in Sysdig Monitor. Metric entries require an *aggregations* section specifying how to aggregate the metric across time and groups of containers/hosts. A grouping key is any of the entries that can be found in the *Show* or *Segment By* sections of the Explore page in Sysdig Monitor. Refer to the examples section below for ready to use code snippets. Note, certain panels allow certain combinations of metrics and grouping keys: + - ``timeSeries``: 1 or more metrics OR 1 metric + 1 grouping key + - ``top``: 1 or more metrics OR 1 metric + 1 grouping key + - ``number``: 1 metric only + - **scope**: filter to apply to the panel; must be based on metadata available in Sysdig Monitor; Example: *kubernetes.namespace.name='production' and container.image='nginx'*. + - **sort_by**: Data sorting; The parameter is optional and it's a dictionary of ``metric`` and ``mode`` (it can be ``desc`` or ``asc``) + - **limit**: This parameter sets the limit on the number of lines/bars shown in a ``timeSeries`` or ``top`` panel. In the case of more entities being available than the limit, the top entities according to the sort will be shown. The default value is 10 for ``top`` panels (for ``timeSeries`` the default is defined by Sysdig Monitor itself). Note that increasing the limit above 10 is not officially supported and may cause performance and rendering issues + - **layout**: Size and position of the panel. The dashboard layout is defined by a grid of 12 columns, each row height is equal to the column height. For example, say you want to show 2 panels at the top: one panel might be 6 x 3 (half the width, 3 rows height) located in row 1 and column 1 (top-left corner of the viewport), the second panel might be 6 x 3 located in row 1 and position 7. The location is specified by a dictionary of ``row`` (row position), ``col`` (column position), ``size_x`` (width), ``size_y`` (height). + + **Success Return Value** + A dictionary showing the details of the edited dashboard. + + **Example** + `examples/dashboard.py `_ + """ + panel_configuration = { + 'name': name, + 'showAs': None, + 'showAsType': None, + 'metrics': [], + 'gridConfiguration': { + 'col': 1, + 'row': 1, + 'size_x': 12, + 'size_y': 6 + } + } + + if panel_type == 'timeSeries': + # + # In case of a time series, the current dashboard implementation + # requires the timestamp to be explicitly specified as "key". + # However, this function uses the same abstraction of the data API + # that doesn't require to specify a timestamp key (you only need to + # specify time window and sampling) + # + metrics = copy.copy(metrics) + metrics.insert(0, {'id': 'timestamp'}) + + # + # Convert list of metrics to format used by Sysdig Monitor + # + property_names = {} + k_count = 0 + v_count = 0 + for i, metric in enumerate(metrics): + property_name = 'v' if 'aggregations' in metric else 'k' + + if property_name == 'k': + i = k_count + k_count += 1 + else: + i = v_count + v_count += 1 + property_names[metric['id']] = property_name + str(i) + + panel_configuration['metrics'].append({ + 'metricId': metric['id'], + 'aggregation': metric['aggregations']['time'] if 'aggregations' in metric else None, + 'groupAggregation': metric['aggregations']['group'] if 'aggregations' in metric else None, + 'propertyName': property_name + str(i) + }) + # + # Convert scope to format used by Sysdig Monitor + # + if scope != None: + filter_expressions = scope.strip(' \t\n\r?!.').split(" and ") + filters = [] + + for filter_expression in filter_expressions: + values = filter_expression.strip(' \t\n\r?!.').split("=") + if len(values) != 2: + return [False, "invalid scope format"] + filters.append({ + 'metric': values[0].strip(' \t\n\r?!.'), + 'op': '=', + 'value': values[1].strip(' \t\n\r"?!.'), + 'filters': None + }) + + if len(filters) > 0: + panel_configuration['filter'] = { + 'filters': { + 'logic': 'and', + 'filters': filters + } + } + + # + # Configure panel type + # + if panel_type == 'timeSeries': + panel_configuration['showAs'] = 'timeSeries' + panel_configuration['showAsType'] = 'line' + + if limit != None: + panel_configuration['paging'] = { + 'from': 0, + 'to': limit - 1 + } + + elif panel_type == 'number': + panel_configuration['showAs'] = 'summary' + panel_configuration['showAsType'] = 'summary' + elif panel_type == 'top': + panel_configuration['showAs'] = 'top' + panel_configuration['showAsType'] = 'bars' + + if sort_by is None: + panel_configuration['sorting'] = [{ + 'id': 'v0', + 'mode': 'desc' + }] + else: + panel_configuration['sorting'] = [{ + 'id': property_names[sort_by['metric']], + 'mode': sort_by['mode'] + }] + + if limit is None: + panel_configuration['paging'] = { + 'from': 0, + 'to': 10 + } + else: + panel_configuration['paging'] = { + 'from': 0, + 'to': limit - 1 + } + + # + # Configure layout + # + if layout != None: + panel_configuration['gridConfiguration'] = layout + + # + # Clone existing dashboard... + # + dashboard_configuration = copy.deepcopy(dashboard) + dashboard_configuration['id'] = None + + # + # ... and add the new panel + # + dashboard_configuration['items'].append(panel_configuration) + + # + # Update dashboard + # + res = requests.put(self.url + '/ui/dashboards/' + str(dashboard['id']), headers=self.hdrs, data=json.dumps({'dashboard': dashboard_configuration}), + verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + else: + return [True, res.json()] + + def remove_dashboard_panel(self, dashboard, panel_name): + '''**Description** + Removes a panel from the dashboard. The panel to remove is identified by the specified ``name``. + + **Arguments** + - **name**: name of the panel to find and remove + + **Success Return Value** + A dictionary showing the details of the edited dashboard. + + **Example** + `examples/dashboard.py `_ + ''' + # + # Clone existing dashboard... + # + dashboard_configuration = copy.deepcopy(dashboard) + dashboard_configuration['id'] = None + + # + # ... find the panel + # + def filter_fn(panel): + return panel['name'] == panel_name + panels = list(filter(filter_fn, dashboard_configuration['items'])) + + if len(panels) > 0: + # + # ... and remove it + # + for panel in panels: + dashboard_configuration['items'].remove(panel) + + # + # Update dashboard + # + res = requests.put(self.url + '/ui/dashboards/' + str(dashboard['id']), headers=self.hdrs, data=json.dumps({'dashboard': dashboard_configuration}), + verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + else: + return [True, res.json()] + else: + return [False, 'Not found'] + + def create_dashboard_from_template(self, dashboard_name, template, scope, shared=False, public=False, annotations={}): + if scope is not None: + if isinstance(scope, basestring) == False: + return [False, 'Invalid scope format: Expected a string'] + + # + # Clean up the dashboard we retireved so it's ready to be pushed + # + template['id'] = None + template['version'] = None + template['schema'] = 1 + template['name'] = dashboard_name + template['isShared'] = shared + template['isPublic'] = public + template['publicToken'] = None + + # + # set dashboard scope to the specific parameter + # NOTE: Individual panels might override the dashboard scope, the override will NOT be reset + # + template['filterExpression'] = scope + + if 'items' in template: + for chart in template['items']: + if 'overrideFilter' in chart and chart['overrideFilter'] == False: + # patch frontend bug to hide scope override warning even when it's not really overridden + chart['scope'] = scope + + if 'annotations' in template: + template['annotations'].update(annotations) + else: + template['annotations'] = annotations + + template['annotations']['createdByEngine'] = True + + # + # Create the new dashboard + # + res = requests.post(self.url + '/ui/dashboards', headers=self.hdrs, data=json.dumps({'dashboard': template}), verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + else: + return [True, res.json()] + + def create_dashboard_from_view(self, newdashname, viewname, filter, shared=False, public=False, annotations={}): + '''**Description** + Create a new dasboard using one of the Sysdig Monitor views as a template. You will be able to define the scope of the new dashboard. + + **Arguments** + - **newdashname**: the name of the dashboard that will be created. + - **viewname**: the name of the view to use as the template for the new dashboard. This corresponds to the name that the view has in the Explore page. + - **filter**: a boolean expression combining Sysdig Monitor segmentation criteria that defines what the new dasboard will be applied to. For example: *kubernetes.namespace.name='production' and container.image='nginx'*. + - **shared**: if set to True, the new dashboard will be a shared one. + - **public**: if set to True, the new dashboard will be shared with public token. + - **annotations**: an optional dictionary of custom properties that you can associate to this dashboard for automation or management reasons + + **Success Return Value** + A dictionary showing the details of the new dashboard. + + **Example** + `examples/create_dashboard.py `_ + ''' + # + # Find our template view + # + gvres = self.get_view(viewname) + if gvres[0] is False: + return gvres + + view = gvres[1]['defaultDashboard'] + + view['timeMode'] = {'mode': 1} + view['time'] = {'last': 2 * 60 * 60 * 1000000, 'sampling': 2 * 60 * 60 * 1000000} + + # + # Create the new dashboard + # + return self.create_dashboard_from_template(newdashname, view, filter, shared, public, annotations) + + def create_dashboard_from_dashboard(self, newdashname, templatename, filter, shared=False, public=False, annotations={}): + '''**Description** + Create a new dasboard using one of the existing dashboards as a template. You will be able to define the scope of the new dasboard. + + **Arguments** + - **newdashname**: the name of the dashboard that will be created. + - **viewname**: the name of the dasboard to use as the template, as it appears in the Sysdig Monitor dashboard page. + - **filter**: a boolean expression combining Sysdig Monitor segmentation criteria defines what the new dasboard will be applied to. For example: *kubernetes.namespace.name='production' and container.image='nginx'*. + - **shared**: if set to True, the new dashboard will be a shared one. + - **public**: if set to True, the new dashboard will be shared with public token. + - **annotations**: an optional dictionary of custom properties that you can associate to this dashboard for automation or management reasons + + **Success Return Value** + A dictionary showing the details of the new dashboard. + + **Example** + `examples/create_dashboard.py `_ + ''' + # + # Get the list of dashboards from the server + # + res = requests.get(self.url + '/ui/dashboards', headers=self.hdrs, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + + j = res.json() + + # + # Find our template dashboard + # + dboard = None + + for db in j['dashboards']: + if db['name'] == templatename: + dboard = db + break + + if dboard is None: + self.lasterr = 'can\'t find dashboard ' + templatename + ' to use as a template' + return [False, self.lasterr] + + # + # Create the dashboard + # + return self.create_dashboard_from_template(newdashname, dboard, filter, shared, public, annotations) + + def create_dashboard_from_file(self, newdashname, filename, filter, shared=False, public=False, annotations={}): + ''' + **Description** + Create a new dasboard using a dashboard template saved to disk. + + **Arguments** + - **newdashname**: the name of the dashboard that will be created. + - **filename**: name of a file containing a JSON object for a dashboard in the format of an array element returned by :func:`~SdcClient.get_dashboards` + - **filter**: a boolean expression combining Sysdig Monitor segmentation criteria defines what the new dasboard will be applied to. For example: *kubernetes.namespace.name='production' and container.image='nginx'*. + - **shared**: if set to True, the new dashboard will be a shared one. + - **public**: if set to True, the new dashboard will be shared with public token. + - **annotations**: an optional dictionary of custom properties that you can associate to this dashboard for automation or management reasons + + **Success Return Value** + A dictionary showing the details of the new dashboard. + + **Example** + `examples/dashboard_save_load.py `_ + ''' + # + # Load the Dashboard + # + with open(filename) as data_file: + dboard = json.load(data_file) + + dboard['timeMode'] = {'mode': 1} + dboard['time'] = {'last': 2 * 60 * 60 * 1000000, 'sampling': 2 * 60 * 60 * 1000000} + + # + # Create the new dashboard + # + return self.create_dashboard_from_template(newdashname, dboard, filter, shared, public, annotations) + + def delete_dashboard(self, dashboard): + '''**Description** + Deletes a dashboard. + + **Arguments** + - **dashboard**: the dashboard object as returned by :func:`~SdcClient.get_dashboards`. + + **Success Return Value** + `None`. + + **Example** + `examples/delete_dashboard.py `_ + ''' + if 'id' not in dashboard: + return [False, "Invalid dashboard format"] + + res = requests.delete(self.url + '/ui/dashboards/' + str(dashboard['id']), headers=self.hdrs, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + + return [True, None] + + def get_metrics(self): + '''**Description** + Return the metric list that can be used for data requests/alerts/dashboards. + + **Success Return Value** + A dictionary containing the list of available metrics. + + **Example** + `examples/list_metrics.py `_ + ''' + res = requests.get(self.url + '/api/data/metrics', headers=self.hdrs, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + return [True, res.json()] + + +# For backwards compatibility +SdcClient = SdMonitorClient diff --git a/sdcclient/_scanning.py b/sdcclient/_scanning.py new file mode 100644 index 00000000..112d37d7 --- /dev/null +++ b/sdcclient/_scanning.py @@ -0,0 +1,722 @@ +import base64 +import hashlib +import json +import re +import requests +import time + +try: + from urllib.parse import quote_plus, unquote_plus +except ImportError: + from urllib import quote_plus, unquote_plus + +from sdcclient._common import _SdcCommon + + +class SdScanningClient(_SdcCommon): + + def __init__(self, token="", sdc_url='https://secure.sysdig.com', ssl_verify=True): + super(SdScanningClient, self).__init__(token, sdc_url, ssl_verify) + + def add_image(self, image, force=False, dockerfile=None, annotations={}, autosubscribe=True): + '''**Description** + Add an image to the scanner + + **Arguments** + - image: Input image can be in the following formats: registry/repo:tag + - dockerfile: The contents of the dockerfile as a str. + - annotations: A dictionary of annotations {str: str}. + - autosubscribe: Should active the subscription to this image? + + **Success Return Value** + A JSON object representing the image that was added. + ''' + itype = self._discover_inputimage_format(image) + if itype != 'tag': + return [False, "can only add a tag"] + + payload = {} + if dockerfile: + payload['dockerfile'] = base64.b64encode(dockerfile.encode()).decode("utf-8") + payload['tag'] = image + if annotations: + payload['annotations'] = annotations + + url = "{base_url}/api/scanning/v1/anchore/images?autosubscribe={autosubscribe}{force}".format( + base_url=self.url, + autosubscribe=str(autosubscribe), + force="&force=true" if force else "") + + res = requests.post(url, data=json.dumps(payload), headers=self.hdrs, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + + return [True, res.json()] + + def import_image(self, image_data): + '''**Description** + Import an image from the scanner export + + **Arguments** + - image_data: A JSON with the image information. + + **Success Return Value** + A JSON object representing the image that was imported. + ''' + url = self.url + "/api/scanning/v1/anchore/imageimport" + res = requests.post(url, data=json.dumps(image_data), headers=self.hdrs, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + + return [True, res.json()] + + def get_image(self, image, show_history=False): + '''**Description** + Find the image with the tag and return its json description + + **Arguments** + - image: Input image can be in the following formats: registry/repo:tag + + **Success Return Value** + A JSON object representing the image. + ''' + itype = self._discover_inputimage_format(image) + if itype not in ['tag', 'imageid', 'imageDigest']: + return [False, "cannot use input image string: no discovered imageDigest"] + + params = {} + params['history'] = str(show_history and itype not in ['imageid', 'imageDigest']).lower() + if itype == 'tag': + params['fulltag'] = image + + url = self.url + "/api/scanning/v1/anchore/images" + url += { + 'imageid': '/by_id/{}'.format(image), + 'imageDigest': '/{}'.format(image) + }.get(itype, '') + + res = requests.get(url, params=params, headers=self.hdrs, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + + return [True, res.json()] + + def list_images(self): + '''**Description** + List the current set of images in the scanner. + + **Arguments** + - None + + **Success Return Value** + A JSON object containing all the images. + ''' + url = self.url + "/api/scanning/v1/anchore/images" + res = requests.get(url, headers=self.hdrs, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + + return [True, res.json()] + + def query_image_content(self, image, content_type=""): + '''**Description** + Find the image with the tag and return its content. + + **Arguments** + - image: Input image can be in the following formats: registry/repo:tag + - content_type: The content type can be one of the following types: + - os: Operating System Packages + - npm: Node.JS NPM Module + - gem: Ruby GEM + - files: Files + + **Success Return Value** + A JSON object representing the image content. + ''' + return self._query_image(image, query_group='content', query_type=content_type) + + def query_image_metadata(self, image, metadata_type=""): + '''**Description** + Find the image with the tag and return its metadata. + + **Arguments** + - image: Input image can be in the following formats: registry/repo:tag + - metadata_type: The metadata type can be one of the types returned by running without a type specified + + **Success Return Value** + A JSON object representing the image metadata. + ''' + return self._query_image(image, query_group='metadata', query_type=metadata_type) + + def query_image_vuln(self, image, vuln_type="", vendor_only=True): + '''**Description** + Find the image with the tag and return its vulnerabilities. + + **Arguments** + - image: Input image can be in the following formats: registry/repo:tag + - vuln_type: Vulnerability type can be one of the following types: + - os: CVE/distro vulnerabilities against operating system packages + + **Success Return Value** + A JSON object representing the image vulnerabilities. + ''' + return self._query_image(image, query_group='vuln', query_type=vuln_type, vendor_only=vendor_only) + + def _query_image(self, image, query_group="", query_type="", vendor_only=True): + if not query_group: + raise Exception("need to specify a query group") + + _, _, image_digest = self._discover_inputimage(image) + if not image_digest: + return [False, "cannot use input image string (no discovered imageDigest)"] + + url = "{base_url}/api/scanning/v1/anchore/images/{image_digest}/{query_group}/{query_type}{vendor_only}".format( + base_url=self.url, + image_digest=image_digest, + query_group=query_group, + query_type=query_type if query_type else '', + vendor_only="?vendor_only={}".format(vendor_only) if query_group == 'vuln' else '') + + res = requests.get(url, headers=self.hdrs, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + + return [True, res.json()] + + def delete_image(self, image, force=False): + '''**Description** + Delete image from the scanner. + + **Arguments** + - None + ''' + _, _, image_digest = self._discover_inputimage(image) + if not image_digest: + return [False, "cannot use input image string: no discovered imageDigest"] + + url = self.url + "/api/scanning/v1/anchore/images/" + image_digest + res = requests.delete(url, headers=self.hdrs, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + + return [True, res.json()] + + def check_image_evaluation(self, image, show_history=False, detail=False, tag=None, policy=None): + '''**Description** + Check the latest policy evaluation for an image + + **Arguments** + - image: Input image can be in the following formats: registry/repo:tag + - show_history: Show all previous policy evaluations + - detail: Show detailed policy evaluation report + - tag: Specify which TAG is evaluated for a given image ID or Image Digest + - policy: Specify which POLICY to use for evaluate (defaults currently active policy) + + **Success Return Value** + A JSON object representing the evaluation status. + ''' + itype, _, image_digest = self._discover_inputimage(image) + if not image_digest: + return [False, "could not get image record from anchore"] + if not tag and itype != 'tag': + return [False, "input image name is not a tag, and no --tag is specified"] + + thetag = tag if tag else image + + url = "{base_url}/api/scanning/v1/anchore/images/{image_digest}/check?history={history}&detail={detail}&tag={tag}{policy_id}" + url = url.format( + base_url=self.url, + image_digest=image_digest, + history=str(show_history).lower(), + detail=str(detail).lower(), + tag=thetag, + policy_id=("&policyId=%s" % policy) if policy else "") + + res = requests.get(url, headers=self.hdrs, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + + return [True, res.json()] + + def add_registry(self, registry, registry_user, registry_pass, insecure=False, registry_type="docker_v2", validate=True): + '''**Description** + Add image registry + + **Arguments** + - registry: Full hostname/port of registry. Eg. myrepo.example.com:5000 + - registry_user: Username + - registry_pass: Password + - insecure: Allow connection to registry without SSL cert checks (ex: if registry uses a self-signed SSL certificate) + - registry_type: Specify the registry type. 'docker_v2' and 'awsecr' are supported (default='docker_v2') + - validate: If set to 'False' will not attempt to validate registry/creds on registry add + + **Success Return Value** + A JSON object representing the registry. + ''' + registry_types = ['docker_v2', 'awsecr'] + if registry_type and registry_type not in registry_types: + return [False, "input registry type not supported (supported registry_types: " + str(registry_types)] + if self._registry_string_is_valid(registry): + return [False, "input registry name cannot contain '/' characters - valid registry names are of the form : where : is optional"] + + if not registry_type: + registry_type = self._get_registry_type(registry) + + payload = { + 'registry': registry, + 'registry_user': registry_user, + 'registry_pass': registry_pass, + 'registry_type': registry_type, + 'registry_verify': not insecure} + url = "{base_url}/api/scanning/v1/anchore/registries?validate={validate}".format( + base_url=self.url, + validate=validate) + + res = requests.post(url, data=json.dumps(payload), headers=self.hdrs, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + + return [True, res.json()] + + def update_registry(self, registry, registry_user, registry_pass, insecure=False, registry_type="docker_v2", validate=True): + '''**Description** + Update an existing image registry. + + **Arguments** + - registry: Full hostname/port of registry. Eg. myrepo.example.com:5000 + - registry_user: Username + - registry_pass: Password + - insecure: Allow connection to registry without SSL cert checks (ex: if registry uses a self-signed SSL certificate) + - registry_type: Specify the registry type. 'docker_v2' and 'awsecr' are supported (default='docker_v2') + - validate: If set to 'False' will not attempt to validate registry/creds on registry add + + **Success Return Value** + A JSON object representing the registry. + ''' + if self._registry_string_is_valid(registry): + return [False, "input registry name cannot contain '/' characters - valid registry names are of the form : where : is optional"] + + payload = { + 'registry': registry, + 'registry_user': registry_user, + 'registry_pass': registry_pass, + 'registry_type': registry_type, + 'registry_verify': not insecure} + url = "{base_url}/api/scanning/v1/anchore/registries/{registry}?validate={validate}".format( + base_url=self.url, + registry=registry, + validate=validate) + + res = requests.put(url, data=json.dumps(payload), headers=self.hdrs, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + + return [True, res.json()] + + def delete_registry(self, registry): + '''**Description** + Delete an existing image registry + + **Arguments** + - registry: Full hostname/port of registry. Eg. myrepo.example.com:5000 + ''' + # do some input string checking + if re.match(".*\\/.*", registry): + return [False, "input registry name cannot contain '/' characters - valid registry names are of the form : where : is optional"] + + url = self.url + "/api/scanning/v1/anchore/registries/" + registry + res = requests.delete(url, headers=self.hdrs, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + + return [True, res.json()] + + def list_registry(self): + '''**Description** + List all current image registries + + **Arguments** + - None + + **Success Return Value** + A JSON object representing the list of registries. + ''' + url = self.url + "/api/scanning/v1/anchore/registries" + res = requests.get(url, headers=self.hdrs, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + + return [True, res.json()] + + def get_registry(self, registry): + '''**Description** + Find the registry and return its json description + + **Arguments** + - registry: Full hostname/port of registry. Eg. myrepo.example.com:5000 + + **Success Return Value** + A JSON object representing the registry. + ''' + if self._registry_string_is_valid(registry): + return [False, "input registry name cannot contain '/' characters - valid registry names are of the form : where : is optional"] + + url = self.url + "/api/scanning/v1/anchore/registries/" + registry + res = requests.get(url, headers=self.hdrs, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + + return [True, res.json()] + + def _get_registry_type(self, registry): + if re.match("[0-9]+\\.dkr\\.ecr\\..*\\.amazonaws\\.com", registry): + return "awsecr" + return "docker_v2" + + def _registry_string_is_valid(self, registry): + return re.match(".*\\/.*", registry) + + def add_policy(self, name, rules, comment="", bundleid=None): + '''**Description** + Create a new policy + + **Arguments** + - name: The name of the policy. + - rules: A list of Anchore PolicyRule elements (while creating/updating a policy, new rule IDs will be created backend side) + - comment: A human-readable description. + - bundleid: Target bundle. If not specified, the currently active bundle will be used. + + **Success Return Value** + A JSON object containing the policy description. + ''' + policy = { + 'name': name, + 'comment': comment, + 'rules': rules, + 'version': '1_0' + } + if bundleid: + policy['policyBundleId'] = bundleid + + url = self.url + '/api/scanning/v1/policies' + data = json.dumps(policy) + res = requests.post(url, headers=self.hdrs, data=data, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + + return [True, res.json()] + + def list_policy_bundles(self, detail=False): + url = "{base_url}/api/scanning/v1/anchore/policies?detail={detail}".format( + base_url=self.url, + detail=str(detail)) + res = requests.get(url, headers=self.hdrs, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + + return [True, res.json()] + + def list_policies(self, bundleid=None): + '''**Description** + List the current set of scanning policies. + + **Arguments** + - bundleid: Target bundle. If not specified, the currently active bundle will be used. + + **Success Return Value** + A JSON object containing the list of policies. + ''' + url = self.url + '/api/scanning/v1/policies' + if bundleid: + url += '?bundleId=' + bundleid + + res = requests.get(url, headers=self.hdrs, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + + return [True, res.json()] + + def get_policy(self, policyid, bundleid=None): + '''**Description** + Retrieve the policy with the given id in the targeted policy bundle + + **Arguments** + - policyid: Unique identifier associated with this policy. + - bundleid: Target bundle. If not specified, the currently active bundle will be used. + + **Success Return Value** + A JSON object containing the policy description. + ''' + url = self.url + '/api/scanning/v1/policies/' + policyid + if bundleid: + url += '?bundleId=' + bundleid + + def update_policy(self, policyid, policy_description): + '''**Description** + Update the policy with the given id + + **Arguments** + - policyid: Unique identifier associated with this policy. + - policy_description: A dictionary with the policy description. + + **Success Return Value** + A JSON object containing the policy description. + ''' + url = self.url + '/api/scanning/v1/policies/' + policyid + data = json.dumps(policy_description) + res = requests.put(url, headers=self.hdrs, data=data, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + + return [True, res.json()] + + def delete_policy(self, policyid, bundleid=None): + '''**Description** + Delete the policy with the given id in the targeted policy Bundle + + **Arguments** + - policyid: Unique identifier associated with this policy. + - policy_description: A dictionary with the policy description. + ''' + url = self.url + '/api/scanning/v1/policies/' + policyid + if bundleid: + url += '?bundleId=' + bundleid + + res = requests.delete(url, headers=self.hdrs, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + + return [True, res.text] + + def add_alert(self, name, description=None, scope="", triggers={'failed': True, 'unscanned': True}, + enabled=False, notification_channels=[]): + '''**Description** + Create a new alert + + **Arguments** + - name: The name of the alert. + - description: The descprition of the alert. + - scope: An AND-composed string of predicates that selects the scope in which the alert will be applied. (like: 'host.domain = "example.com" and container.image != "alpine:latest"') + - tiggers: A dict {str: bool} indicating wich triggers should be enabled/disabled. (default: {'failed': True, 'unscanned': True}) + - enabled: Whether this alert should actually be applied. + - notification_channels: A list of notification channel ids. + + **Success Return Value** + A JSON object containing the alert description. + ''' + alert = { + 'name': name, + 'description': description, + 'triggers': triggers, + 'scope': scope, + 'enabled': enabled, + 'autoscan': True, + 'notificationChannelIds': notification_channels, + } + + url = self.url + '/api/scanning/v1/alerts' + data = json.dumps(alert) + res = requests.post(url, headers=self.hdrs, data=data, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + + return [True, res.json()] + + def list_alerts(self, limit=None, cursor=None): + '''**Description** + List the current set of scanning alerts. + + **Arguments** + - limit: Maximum number of alerts in the response. + - cursor: An opaque string representing the current position in the list of alerts. It's provided in the 'responseMetadata' of the list_alerts response. + + **Success Return Value** + A JSON object containing the list of alerts. + ''' + url = self.url + '/api/scanning/v1/alerts' + if limit: + url += '?limit=' + str(limit) + if cursor: + url += '&cursor=' + cursor + + res = requests.get(url, headers=self.hdrs, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + + return [True, res.json()] + + def get_alert(self, alertid): + '''**Description** + Retrieve the scanning alert with the given id + + **Arguments** + - alertid: Unique identifier associated with this alert. + + **Success Return Value** + A JSON object containing the alert description. + ''' + url = self.url + '/api/scanning/v1/alerts/' + alertid + res = requests.get(url, headers=self.hdrs, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + + return [True, res.json()] + + def update_alert(self, alertid, alert_description): + '''**Description** + Update the alert with the given id + + **Arguments** + - alertid: Unique identifier associated with this alert. + - alert_description: A dictionary with the alert description. + + **Success Return Value** + A JSON object containing the alert description. + ''' + url = self.url + '/api/scanning/v1/alerts/' + alertid + data = json.dumps(alert_description) + res = requests.put(url, headers=self.hdrs, data=data, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + + return [True, res.json()] + + def delete_alert(self, policyid): + '''**Description** + Delete the alert with the given id + + **Arguments** + - alertid: Unique identifier associated with this alert. + ''' + url = self.url + '/api/scanning/v1/alerts/' + policyid + res = requests.delete(url, headers=self.hdrs, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + + return [True, res.text] + + def activate_subscription(self, subscription_type, subscription_key): + '''**Description** + Activate a subscription + + **Arguments** + - subscription_type: Type of subscription. Valid options: + - 'tag_update': Receive notification when new image is pushed + - 'policy_eval': Receive notification when image policy status changes + - 'vuln_update': Receive notification when vulnerabilities are added, removed or modified + - subscription_key: Fully qualified name of tag to subscribe to. Eg. docker.io/library/alpine:latest + ''' + return self._update_subscription(subscription_type, subscription_key, True) + + def deactivate_subscription(self, subscription_type, subscription_key): + '''**Description** + Deactivate a subscription + + **Arguments** + - subscription_type: Type of subscription. Valid options: + - 'tag_update': Receive notification when new image is pushed + - 'policy_eval': Receive notification when image policy status changes + - 'vuln_update': Receive notification when vulnerabilities are added, removed or modified + - subscription_key: Fully qualified name of tag to subscribe to. Eg. docker.io/library/alpine:latest + ''' + return self._update_subscription(subscription_type, subscription_key, False) + + def _update_subscription(self, subscription_type, subscription_key, activate): + hashstr = '+'.join([self.token, subscription_key, subscription_type]).encode('utf-8') + subscription_id = hashlib.md5(hashstr).hexdigest() + url = self.url + "/api/scanning/v1/anchore/subscriptions/" + subscription_id + payload = {'active': activate, 'subscription_key': subscription_key, 'subscription_type': subscription_type} + + res = requests.put(url, data=json.dumps(payload), headers=self.hdrs, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + + return [True, res.json()] + + def list_subscription(self): + '''**Description** + List all subscriptions + + **Arguments** + - None + + **Success Return Value** + A JSON object representing the list of subscriptions. + ''' + url = self.url + "/api/scanning/v1/anchore/subscriptions" + res = requests.get(url, headers=self.hdrs, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + + return [True, res.json()] + + def list_runtime(self, scope="", skip_policy_evaluation=True, start_time=None, end_time=None): + '''**Description** + List runtime containers + + **Arguments** + - scope: An AND-composed string of predicates that selects the scope in which the alert will be applied. (like: 'host.domain = "example.com" and container.image != "alpine:latest"') + - skip_policy_evaluation: If true, no policy evaluations will be triggered for the images. + - start_time: Start of the time range (integer of unix time). + - end_time: End of the time range (integer of unix time). + + **Success Return Value** + A JSON object representing the list of runtime containers. + ''' + containers = { + 'scope': scope, + 'skipPolicyEvaluation': skip_policy_evaluation + } + if start_time or end_time: + containers['time'] = {} + containers['time']['from'] = int(start_time * 100000) if start_time else 0 + end_time = end_time if end_time else time.time() + containers['time']['to'] = int(end_time * 1000000) + + url = self.url + '/api/scanning/v1/query/containers' + data = json.dumps(containers) + res = requests.post(url, headers=self.hdrs, data=data, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + + return [True, res.json()] + + def _discover_inputimage_format(self, input_string): + itype = None + + if re.match("^sha256:[0-9a-fA-F]{64}", input_string): + itype = 'imageDigest' + elif re.match("[0-9a-fA-F]{64}", input_string): + itype = 'imageid' + else: + itype = 'tag' + + return itype + + def _discover_inputimage(self, input_string): + patt = re.match(".*(sha256:.*)", input_string) + if patt: + urldigest = quote_plus(patt.group(1)) + return "digest", input_string, urldigest + + try: + digest = unquote_plus(str(input_string)) + for tpe in ["sha256", "local"]: + patt = re.match(".*({}:.*)".format(tpe), digest) + if patt: + return "imageDigest", input_string, input_string + except Exception: + pass + + urldigest = None + ret_type = "tag" + ok, ret = self.get_image(input_string) + if ok: + image_record = ret[0] + urldigest = image_record.get('imageDigest', None) + for image_detail in image_record.get('image_detail', []): + if input_string == image_detail.get('imageId', ''): + ret_type = "imageid" + break + + return ret_type, input_string, urldigest diff --git a/sdcclient/_secure.py b/sdcclient/_secure.py new file mode 100644 index 00000000..ab6fc759 --- /dev/null +++ b/sdcclient/_secure.py @@ -0,0 +1,680 @@ +import json +import datetime +import requests +import shutil + +from sdcclient._common import _SdcCommon + + +class SdSecureClient(_SdcCommon): + + def __init__(self, token="", sdc_url='https://secure.sysdig.com', ssl_verify=True): + super(SdSecureClient, self).__init__(token, sdc_url, ssl_verify) + + self.customer_id = None + + def _get_falco_rules(self, kind): + res = requests.get(self.url + '/api/settings/falco/{}RulesFile'.format(kind), headers=self.hdrs, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + data = res.json() + return [True, data] + + def get_system_falco_rules(self): + '''**Description** + Get the system falco rules file in use for this customer. See the `Falco wiki `_ for documentation on the falco rules format. + + **Arguments** + - None + + **Success Return Value** + The contents of the system falco rules file. + + **Example** + `examples/get_secure_system_falco_rules.py `_ + ''' + + return self._get_falco_rules("system") + + def get_user_falco_rules(self): + '''**Description** + Get the user falco rules file in use for this customer. See the `Falco wiki `_ for documentation on the falco rules format. + + **Arguments** + - None + + **Success Return Value** + The contents of the user falco rules file. + + **Example** + `examples/get_secure_user_falco_rules.py `_ + ''' + return self._get_falco_rules("user") + + def _set_falco_rules(self, kind, rules_content): + payload = self._get_falco_rules(kind) + + if not payload[0]: + return payload + + payload[1]["{}RulesFile".format(kind)]["content"] = rules_content # pylint: disable=unsubscriptable-object + + res = requests.put(self.url + '/api/settings/falco/{}RulesFile'.format(kind), headers=self.hdrs, data=json.dumps(payload[1]), verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + return [True, res.json()] + + def set_system_falco_rules(self, rules_content): + '''**Description** + Set the system falco rules file in use for this customer. NOTE: This API endpoint can *only* be used in on-premise deployments. Generally the system falco rules file is only modified in conjunction with Sysdig support. See the `Falco wiki `_ for documentation on the falco rules format. + + **Arguments** + - A string containing the system falco rules. + + **Success Return Value** + The contents of the system falco rules file that were just updated. + + **Example** + `examples/set_secure_system_falco_rules.py `_ + + ''' + return self._set_falco_rules("system", rules_content) + + def set_user_falco_rules(self, rules_content): + '''**Description** + Set the user falco rules file in use for this customer. See the `Falco wiki `_ for documentation on the falco rules format. + + **Arguments** + - A string containing the user falco rules. + + **Success Return Value** + The contents of the user falco rules file that were just updated. + + **Example** + `examples/set_secure_user_falco_rules.py `_ + + ''' + return self._set_falco_rules("user", rules_content) + + # Only one kind for now called "default", but might add a "custom" kind later. + def _get_falco_rules_files(self, kind): + + res = requests.get(self.url + '/api/settings/falco/{}RulesFiles'.format(kind), headers=self.hdrs, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + data = res.json() + + return [True, data] + + def get_default_falco_rules_files(self): + '''**Description** + Get the set of falco rules files from the backend. The _files programs and endpoints are a + replacement for the system_file endpoints and allow for publishing multiple files instead + of a single file as well as publishing multiple variants of a given file that are compatible + with different agent versions. + + **Arguments** + - None + + **Success Return Value** + A dict with the following keys: + - tag: A string used to uniquely identify this set of rules. It is recommended that this tag change every time the set of rules is updated. + - files: An array of dicts. Each dict has the following keys: + - name: the name of the file + - variants: An array of dicts with the following keys: + - requiredEngineVersion: the minimum falco engine version that can read this file + - content: the falco rules content + An example would be: + {'tag': 'v1.5.9', + 'files': [ + { + 'name': 'falco_rules.yaml', + 'variants': [ + { + 'content': '- required_engine_version: 29\n\n- list: foo\n', + 'requiredEngineVersion': 29 + }, + { + 'content': '- required_engine_version: 1\n\n- list: foo\n', + 'requiredEngineVersion': 1 + } + ] + }, + { + 'name': 'k8s_audit_rules.yaml', + 'variants': [ + { + 'content': '# some comment\n', + 'requiredEngineVersion': 0 + } + ] + } + ] + } + + **Example** + `examples/get_default_falco_rules_files.py `_ + ''' + + res = self._get_falco_rules_files("default") + + if not res[0]: + return res + else: + res_obj = res[1]["defaultFalcoRulesFiles"] + + # Copy only the tag and files over + ret = {} + + if "tag" in res_obj: + ret["tag"] = res_obj["tag"] + + if "files" in res_obj: + ret["files"] = res_obj["files"] + + return [True, ret] + + def save_default_falco_rules_files(self, fsobj, save_dir): + '''**Description** + Given a dict returned from get_default_falco_rules_files, save those files to a set of files below save_dir. + The first level below save_dir is a directory with the tag name. The second level is a directory per file. + The third level is a directory per variant. Finally the files are at the lowest level, in a file called "content". + For example, using the example dict in get_default_falco_rules_files(), the directory layout would look like: + save_dir/ + v1.5.9/ + falco_rules.yaml/ + 29/ + content: a file containing "- required_engine_version: 29\n\n- list: foo\n" + 1/ + content: a file containing "- required_engine_version: 1\n\n- list: foo\n" + k8s_audit_rules.yaml/ + 0/ + content: a file containing "# some comment" + **Arguments** + - fsobj: a python dict matching the structure returned by get_default_falco_rules_files() + - save_dir: a directory path under which to save the files. If the path already exists, it will be removed first. + + **Success Return Value** + - None + + **Example** + `examples/get_default_falco_rules_files.py `_ + ''' + if os.path.exists(save_dir): + try: + if os.path.isdir(save_dir): + shutil.rmtree(save_dir) + else: + os.unlink(save_dir) + except Exception as e: + return [False, "Could not remove existing save dir {}: {}".format(save_dir, str(e))] + + prefix = os.path.join(save_dir, fsobj["tag"]) + try: + os.makedirs(prefix) + except Exception as e: + return [False, "Could not create tag directory {}: {}".format(prefix, str(e))] + + if "files" in fsobj: + for fobj in fsobj["files"]: + fprefix = os.path.join(prefix, fobj["name"]) + try: + os.makedirs(fprefix) + except Exception as e: + return [False, "Could not create file directory {}: {}".format(fprefix, str(e))] + for variant in fobj["variants"]: + vprefix = os.path.join(fprefix, str(variant["requiredEngineVersion"])) + try: + os.makedirs(vprefix) + except Exception as e: + return [False, "Could not create variant directory {}: {}".format(vprefix, str(e))] + cpath = os.path.join(vprefix, "content") + try: + with open(cpath, "w") as cfile: + cfile.write(variant["content"]) + except Exception as e: + return [False, "Could not write content to {}: {}".format(cfile, str(e))] + + return [True, None] + + # Only One kind for now, but might add a "custom" kind later. + def _set_falco_rules_files(self, kind, rules_files): + + payload = self._get_falco_rules_files(kind) + + if not payload[0]: + return payload + + obj = payload[1]["{}FalcoRulesFiles".format(kind)] # pylint: disable=unsubscriptable-object + + obj["tag"] = rules_files["tag"] + obj["files"] = rules_files["files"] + + res = requests.put(self.url + '/api/settings/falco/{}RulesFiles'.format(kind), headers=self.hdrs, data=json.dumps(payload[1]), verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + return [True, res.json()] + + def set_default_falco_rules_files(self, rules_files): + '''**Description** + Update the set of falco rules files to the provided set of files. See the `Falco wiki `_ for documentation on the falco rules format. + The _files programs and endpoints are a replacement for the system_file endpoints and + allow for publishing multiple files instead of a single file as well as publishing + multiple variants of a given file that are compatible with different agent versions. + + **Arguments** + - rules_files: a dict with the same structure as returned by get_default_falco_rules_files. + + **Success Return Value** + The contents of the default falco rules files that were just updated. + + **Example** + `examples/set_default_falco_rules_files.py `_ + + ''' + + return self._set_falco_rules_files("default", rules_files) + + def load_default_falco_rules_files(self, save_dir): + '''**Description** + Given a file and directory layout as described in save_default_falco_rules_files(), load those files and + return a dict representing the contents. This dict is suitable for passing to set_default_falco_rules_files(). + + **Arguments** + - save_dir: a directory path from which to load the files. + + **Success Return Value** + - A dict matching the format described in get_default_falco_rules_files. + + **Example** + `examples/set_default_falco_rules_files.py `_ + ''' + + tags = os.listdir(save_dir) + if len(tags) != 1: + return [False, "Directory {} did not contain exactly 1 entry".format(save_dir)] + + tpath = os.path.join(save_dir, tags[0]) + + if not os.path.isdir(tpath): + return [False, "Tag path {} is not a directory".format(tpath)] + + ret = {"tag": os.path.basename(tpath), "files": []} + + for fdir in os.listdir(tpath): + fpath = os.path.join(tpath, fdir) + if not os.path.isdir(fpath): + return [False, "File path {} is not a directory".format(fpath)] + fobj = {"name": os.path.basename(fpath), "variants": []} + for vdir in os.listdir(fpath): + vpath = os.path.join(fpath, vdir) + if not os.path.isdir(vpath): + return [False, "Variant path {} is not a directory".format(vpath)] + cpath = os.path.join(vpath, "content") + try: + with open(cpath, 'r') as content_file: + try: + required_engine_version = int(os.path.basename(vpath)) + if vpath < 0: + return [False, "Variant directory {} must be a positive number".format(vpath)] + fobj["variants"].append({ + "requiredEngineVersion": required_engine_version, + "content": content_file.read() + }) + except ValueError: + return [False, "Variant directory {} must be a number".format(vpath)] + except Exception as e: + return [False, "Could not read content at {}: {}".format(cpath, str(e))] + + ret["files"].append(fobj) + + return [True, ret] + + def _get_policy_events_int(self, ctx): + policy_events_url = self.url + '/api/policyEvents?from={:d}&to={:d}&offset={}&limit={}'.format(int(ctx['from']), int(ctx['to']), ctx['offset'], ctx['limit']) + if 'sampling' in ctx: + policy_events_url += '&sampling={:d}'.format(int(ctx['sampling'])) + + res = requests.get(policy_events_url, headers=self.hdrs, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + + # Increment the offset by limit + ctx['offset'] += ctx['limit'] + + return [True, {"ctx": ctx, "data": res.json()}] + + def get_policy_events_range(self, from_sec, to_sec, sampling=None): + '''**Description** + Fetch all policy events that occurred in the time range [from_sec:to_sec]. This method is used in conjunction + with :func:`~sdcclient.SdSecureClient.get_more_policy_events` to provide paginated access to policy events. + + **Arguments** + - from_sec: the start of the timerange for which to get events + - end_sec: the end of the timerange for which to get events + - sampling: sample all policy events using *sampling* interval. + + **Success Return Value** + An array containing: + - A context object that should be passed to later calls to get_more_policy_events. + - An array of policy events, in JSON format. See :func:`~sdcclient.SdSecureClient.get_more_policy_events` + for details on the contents of policy events. + + **Example** + `examples/get_secure_policy_events.py `_ + + ''' + ctx = {"from": int(from_sec) * 1000000, + "to": int(to_sec) * 1000000, + "offset": 0, + "limit": 1000} + + if sampling is not None: + ctx["sampling"] = sampling + + return self._get_policy_events_int(ctx) + + def get_policy_events_duration(self, duration_sec, sampling=None): + '''**Description** + Fetch all policy events that occurred in the last duration_sec seconds. This method is used in conjunction with + :func:`~sdcclient.SdSecureClient.get_more_policy_events` to provide paginated access to policy events. + + **Arguments** + - duration_sec: Fetch all policy events that have occurred in the last *duration_sec* seconds. + - sampling: Sample all policy events using *sampling* interval. + + **Success Return Value** + An array containing: + - A context object that should be passed to later calls to get_more_policy_events. + - An array of policy events, in JSON format. See :func:`~sdcclient.SdSecureClient.get_more_policy_events` + for details on the contents of policy events. + + **Example** + `examples/get_secure_policy_events.py `_ + + ''' + epoch = datetime.datetime.utcfromtimestamp(0) + + to_ts = (datetime.datetime.utcnow() - epoch).total_seconds() * 1000 * 1000 + from_ts = to_ts - (int(duration_sec) * 1000 * 1000) + ctx = {"to": to_ts, + "from": from_ts, + "offset": 0, + "limit": 1000} + + if sampling is not None: + ctx["sampling"] = sampling + + return self._get_policy_events_int(ctx) + + def get_more_policy_events(self, ctx): + '''**Description** + Fetch additional policy events after an initial call to :func:`~sdcclient.SdSecureClient.get_policy_events_range` / + :func:`~sdcclient.SdSecureClient.get_policy_events_duration` or a prior call to get_more_policy_events. + + **Arguments** + - ctx: a context object returned from an initial call to :func:`~sdcclient.SdSecureClient.get_policy_events_range` / + :func:`~sdcclient.SdSecureClient.get_policy_events_duration` or a prior call to get_more_policy_events. + + **Success Return Value** + An array containing: + - A context object that should be passed to later calls to get_more_policy_events() + - An array of policy events, in JSON format. Each policy event contains the following: + - hostMac: the mac address of the machine where the event occurred + - severity: a severity level from 1-7 + - timestamp: when the event occurred (ns since the epoch) + - version: a version number for this message (currently 1) + - policyId: a reference to the policy that generated this policy event + - output: A string describing the event that occurred + - id: a unique identifier for this policy event + - isAggregated: if true, this is a combination of multiple policy events + - containerId: the container in which the policy event occurred + + When the number of policy events returned is 0, there are no remaining events and you can stop calling get_more_policy_events(). + + **Example** + `examples/get_secure_policy_events.py `_ + ''' + return self._get_policy_events_int(ctx) + + def create_default_policies(self): + '''**Description** + Create a set of default policies using the current system falco rules file as a reference. For every falco rule in the system + falco rules file, one policy will be created. The policy will take the name and description from the name and description of + the corresponding falco rule. If a policy already exists with the same name, no policy is added or modified. Existing + policies will be unchanged. + + **Arguments** + - None + + **Success Return Value** + JSON containing details on any new policies that were added. + + **Example** + `examples/create_default_policies.py `_ + + ''' + res = requests.post(self.url + '/api/policies/createDefault', headers=self.hdrs, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + + return [True, res.json()] + + def delete_all_policies(self): + '''**Description** + Delete all existing policies. The falco rules file is unchanged. + + **Arguments** + - None + + **Success Return Value** + The string "Policies Deleted" + + **Example** + `examples/delete_all_policies.py `_ + + ''' + res = requests.post(self.url + '/api/policies/deleteAll', headers=self.hdrs, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + + return [True, "Policies Deleted"] + + def list_policies(self): + '''**Description** + List the current set of policies. + + **Arguments** + - None + + **Success Return Value** + A JSON object containing the number and details of each policy. + + **Example** + `examples/list_policies.py `_ + + ''' + res = requests.get(self.url + '/api/policies', headers=self.hdrs, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + + return [True, res.json()] + + def get_policy_priorities(self): + '''**Description** + Get a list of policy ids in the order they will be evaluated. + + **Arguments** + - None + + **Success Return Value** + A JSON object representing the list of policy ids. + + **Example** + `examples/list_policies.py `_ + + ''' + + res = requests.get(self.url + '/api/policies/priorities', headers=self.hdrs, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + + return [True, res.json()] + + def set_policy_priorities(self, priorities_json): + '''**Description** + Change the policy evaluation order + + **Arguments** + - priorities_json: a description of the new policy order. + + **Success Return Value** + A JSON object representing the updated list of policy ids. + + **Example** + `examples/set_policy_order.py `_ + + ''' + + try: + json.loads(priorities_json) + except Exception as e: + return [False, "priorities json is not valid json: {}".format(str(e))] + + res = requests.put(self.url + '/api/policies/priorities', headers=self.hdrs, data=priorities_json, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + + return [True, res.json()] + + def get_policy(self, name): + '''**Description** + Find the policy with name and return its json description. + + **Arguments** + - name: the name of the policy to fetch + + **Success Return Value** + A JSON object containing the description of the policy. If there is no policy with + the given name, returns False. + + **Example** + `examples/get_policy.py `_ + + ''' + res = requests.get(self.url + '/api/policies', headers=self.hdrs, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + + policies = res.json()["policies"] + + # Find the policy with the given name and return it. + for policy in policies: + if policy["name"] == name: + return [True, policy] + + return [False, "No policy with name {}".format(name)] + + def add_policy(self, policy_json): + '''**Description** + Add a new policy using the provided json. + + **Arguments** + - policy_json: a description of the new policy + + **Success Return Value** + The string "OK" + + **Example** + `examples/add_policy.py `_ + + ''' + + try: + policy_obj = json.loads(policy_json) + except Exception as e: + return [False, "policy json is not valid json: {}".format(str(e))] + + body = {"policy": policy_obj} + res = requests.post(self.url + '/api/policies', headers=self.hdrs, data=json.dumps(body), verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + + return [True, res.json()] + + def update_policy(self, policy_json): + '''**Description** + Update an existing policy using the provided json. The 'id' field from the policy is + used to determine which policy to update. + + **Arguments** + - policy_json: a description of the new policy + + **Success Return Value** + The string "OK" + + **Example** + `examples/update_policy.py `_ + + ''' + + try: + policy_obj = json.loads(policy_json) + except Exception as e: + return [False, "policy json is not valid json: {}".format(str(e))] + + if "id" not in policy_obj: + return [False, "Policy Json does not have an 'id' field"] + + body = {"policy": policy_obj} + + res = requests.put(self.url + '/api/policies/{}'.format(policy_obj["id"]), headers=self.hdrs, data=json.dumps(body), verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + + return [True, res.json()] + + def delete_policy_name(self, name): + '''**Description** + Delete the policy with the given name. + + **Arguments** + - name: the name of the policy to delete + + **Success Return Value** + The JSON object representing the now-deleted policy. + + **Example** + `examples/delete_policy.py `_ + + ''' + res = requests.get(self.url + '/api/policies', headers=self.hdrs, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + + # Find the policy with the given name and delete it + for policy in res.json()["policies"]: + if policy["name"] == name: + return self.delete_policy_id(policy["id"]) + + return [False, "No policy with name {}".format(name)] + + def delete_policy_id(self, id): + '''**Description** + Delete the policy with the given id + + **Arguments** + - id: the id of the policy to delete + + **Success Return Value** + The JSON object representing the now-deleted policy. + + **Example** + `examples/delete_policy.py `_ + + ''' + res = requests.delete(self.url + '/api/policies/{}'.format(id), headers=self.hdrs, verify=self.ssl_verify) + if not self._checkResponse(res): + return [False, self.lasterr] + + return [True, res.json()]