diff --git a/monitoring/api/v3/alerts-client/snippets.py b/monitoring/api/v3/alerts-client/snippets.py index c56c2fcab20..d6d8b4106f7 100644 --- a/monitoring/api/v3/alerts-client/snippets.py +++ b/monitoring/api/v3/alerts-client/snippets.py @@ -27,9 +27,14 @@ def list_alert_policies(project_name): client = monitoring_v3.AlertPolicyServiceClient() policies = client.list_alert_policies(project_name) - print(tabulate.tabulate( - [(policy.name, policy.display_name) for policy in policies], - ('name', 'display_name'))) + print( + tabulate.tabulate( + [(policy.name, policy.display_name) for policy in policies], + ("name", "display_name"), + ) + ) + + # [END monitoring_alert_list_policies] @@ -37,9 +42,14 @@ def list_alert_policies(project_name): def list_notification_channels(project_name): client = monitoring_v3.NotificationChannelServiceClient() channels = client.list_notification_channels(project_name) - print(tabulate.tabulate( - [(channel.name, channel.display_name) for channel in channels], - ('name', 'display_name'))) + print( + tabulate.tabulate( + [(channel.name, channel.display_name) for channel in channels], + ("name", "display_name"), + ) + ) + + # [END monitoring_alert_list_channels] @@ -60,20 +70,26 @@ def enable_alert_policies(project_name, enable, filter_=None): for policy in policies: if bool(enable) == policy.enabled.value: - print('Policy', policy.name, 'is already', - 'enabled' if policy.enabled.value else 'disabled') + print( + "Policy", + policy.name, + "is already", + "enabled" if policy.enabled.value else "disabled", + ) else: policy.enabled.value = bool(enable) mask = monitoring_v3.types.field_mask_pb2.FieldMask() - mask.paths.append('enabled') + mask.paths.append("enabled") client.update_alert_policy(policy, mask) - print('Enabled' if enable else 'Disabled', policy.name) + print("Enabled" if enable else "Disabled", policy.name) + + # [END monitoring_alert_enable_policies] # [START monitoring_alert_replace_channels] def replace_notification_channels(project_name, alert_policy_id, channel_ids): - _, project_id = project_name.split('/') + _, project_id = project_name.split("/") alert_client = monitoring_v3.AlertPolicyServiceClient() channel_client = monitoring_v3.NotificationChannelServiceClient() policy = monitoring_v3.types.alert_pb2.AlertPolicy() @@ -81,12 +97,15 @@ def replace_notification_channels(project_name, alert_policy_id, channel_ids): for channel_id in channel_ids: policy.notification_channels.append( - channel_client.notification_channel_path(project_id, channel_id)) + channel_client.notification_channel_path(project_id, channel_id) + ) mask = monitoring_v3.types.field_mask_pb2.FieldMask() - mask.paths.append('notification_channels') + mask.paths.append("notification_channels") updated_policy = alert_client.update_alert_policy(policy, mask) - print('Updated', updated_policy.name) + print("Updated", updated_policy.name) + + # [END monitoring_alert_replace_channels] @@ -94,16 +113,16 @@ def replace_notification_channels(project_name, alert_policy_id, channel_ids): def delete_notification_channels(project_name, channel_ids, force=None): channel_client = monitoring_v3.NotificationChannelServiceClient() for channel_id in channel_ids: - channel_name = '{}/notificationChannels/{}'.format( - project_name, channel_id) + channel_name = "{}/notificationChannels/{}".format(project_name, channel_id) try: - channel_client.delete_notification_channel( - channel_name, force=force) - print('Channel {} deleted'.format(channel_name)) + channel_client.delete_notification_channel(channel_name, force=force) + print("Channel {} deleted".format(channel_name)) except ValueError: - print('The parameters are invalid') + print("The parameters are invalid") except Exception as e: - print('API call failed: {}'.format(e)) + print("API call failed: {}".format(e)) + + # [END monitoring_alert_delete_channel] @@ -111,25 +130,32 @@ def delete_notification_channels(project_name, channel_ids, force=None): def backup(project_name, backup_filename): alert_client = monitoring_v3.AlertPolicyServiceClient() channel_client = monitoring_v3.NotificationChannelServiceClient() - record = {'project_name': project_name, - 'policies': list(alert_client.list_alert_policies(project_name)), - 'channels': list(channel_client.list_notification_channels( - project_name))} - json.dump(record, open(backup_filename, 'wt'), cls=ProtoEncoder, indent=2) - print('Backed up alert policies and notification channels to {}.'.format( - backup_filename) + record = { + "project_name": project_name, + "policies": list(alert_client.list_alert_policies(project_name)), + "channels": list(channel_client.list_notification_channels(project_name)), + } + json.dump(record, open(backup_filename, "wt"), cls=ProtoEncoder, indent=2) + print( + "Backed up alert policies and notification channels to {}.".format( + backup_filename + ) ) class ProtoEncoder(json.JSONEncoder): """Uses google.protobuf.json_format to encode protobufs as json.""" + def default(self, obj): - if type(obj) in (monitoring_v3.types.alert_pb2.AlertPolicy, - monitoring_v3.types.notification_pb2. - NotificationChannel): + if type(obj) in ( + monitoring_v3.types.alert_pb2.AlertPolicy, + monitoring_v3.types.notification_pb2.NotificationChannel, + ): text = google.protobuf.json_format.MessageToJson(obj) return json.loads(text) return super(ProtoEncoder, self).default(obj) + + # [END monitoring_alert_backup_policies] @@ -139,21 +165,29 @@ def default(self, obj): # [START monitoring_alert_update_channel] # [START monitoring_alert_enable_channel] def restore(project_name, backup_filename): - print('Loading alert policies and notification channels from {}.'.format( - backup_filename) + print( + "Loading alert policies and notification channels from {}.".format( + backup_filename + ) ) - record = json.load(open(backup_filename, 'rt')) - is_same_project = project_name == record['project_name'] + record = json.load(open(backup_filename, "rt")) + is_same_project = project_name == record["project_name"] # Convert dicts to AlertPolicies. - policies_json = [json.dumps(policy) for policy in record['policies']] - policies = [google.protobuf.json_format.Parse( - policy_json, monitoring_v3.types.alert_pb2.AlertPolicy()) - for policy_json in policies_json] + policies_json = [json.dumps(policy) for policy in record["policies"]] + policies = [ + google.protobuf.json_format.Parse( + policy_json, monitoring_v3.types.alert_pb2.AlertPolicy() + ) + for policy_json in policies_json + ] # Convert dicts to NotificationChannels - channels_json = [json.dumps(channel) for channel in record['channels']] - channels = [google.protobuf.json_format.Parse( - channel_json, monitoring_v3.types.notification_pb2. - NotificationChannel()) for channel_json in channels_json] + channels_json = [json.dumps(channel) for channel in record["channels"]] + channels = [ + google.protobuf.json_format.Parse( + channel_json, monitoring_v3.types.notification_pb2.NotificationChannel() + ) + for channel_json in channels_json + ] # Restore the channels. channel_client = monitoring_v3.NotificationChannelServiceClient() @@ -161,12 +195,13 @@ def restore(project_name, backup_filename): for channel in channels: updated = False - print('Updating channel', channel.display_name) + print("Updating channel", channel.display_name) # This field is immutable and it is illegal to specify a # non-default value (UNVERIFIED or VERIFIED) in the # Create() or Update() operations. - channel.verification_status = monitoring_v3.enums.NotificationChannel.\ - VerificationStatus.VERIFICATION_STATUS_UNSPECIFIED + channel.verification_status = ( + monitoring_v3.enums.NotificationChannel.VerificationStatus.VERIFICATION_STATUS_UNSPECIFIED + ) if is_same_project: try: @@ -180,17 +215,18 @@ def restore(project_name, backup_filename): old_name = channel.name channel.ClearField("name") new_channel = channel_client.create_notification_channel( - project_name, channel) + project_name, channel + ) channel_name_map[old_name] = new_channel.name # Restore the alerts alert_client = monitoring_v3.AlertPolicyServiceClient() for policy in policies: - print('Updating policy', policy.display_name) + print("Updating policy", policy.display_name) # These two fields cannot be set directly, so clear them. - policy.ClearField('creation_record') - policy.ClearField('mutation_record') + policy.ClearField("creation_record") + policy.ClearField("mutation_record") # Update old channel names with new channel names. for i, channel in enumerate(policy.notification_channels): @@ -218,7 +254,9 @@ def restore(project_name, backup_filename): for condition in policy.conditions: condition.ClearField("name") policy = alert_client.create_alert_policy(project_name, policy) - print('Updated', policy.name) + print("Updated", policy.name) + + # [END monitoring_alert_enable_channel] # [END monitoring_alert_restore_policies] # [END monitoring_alert_create_policy] @@ -239,105 +277,83 @@ def project_id(): Returns: str -- the project name """ - project_id = os.environ['GCLOUD_PROJECT'] + project_id = os.environ["GCLOUD_PROJECT"] if not project_id: raise MissingProjectIdError( - 'Set the environment variable ' + - 'GCLOUD_PROJECT to your Google Cloud Project Id.') + "Set the environment variable " + + "GCLOUD_PROJECT to your Google Cloud Project Id." + ) return project_id def project_name(): - return 'projects/' + project_id() + return "projects/" + project_id() -if __name__ == '__main__': +if __name__ == "__main__": parser = argparse.ArgumentParser( - description='Demonstrates AlertPolicy API operations.') + description="Demonstrates AlertPolicy API operations." + ) - subparsers = parser.add_subparsers(dest='command') + subparsers = parser.add_subparsers(dest="command") list_alert_policies_parser = subparsers.add_parser( - 'list-alert-policies', - help=list_alert_policies.__doc__ + "list-alert-policies", help=list_alert_policies.__doc__ ) list_notification_channels_parser = subparsers.add_parser( - 'list-notification-channels', - help=list_alert_policies.__doc__ + "list-notification-channels", help=list_alert_policies.__doc__ ) enable_alert_policies_parser = subparsers.add_parser( - 'enable-alert-policies', - help=enable_alert_policies.__doc__ - ) - enable_alert_policies_parser.add_argument( - '--filter', + "enable-alert-policies", help=enable_alert_policies.__doc__ ) + enable_alert_policies_parser.add_argument("--filter",) disable_alert_policies_parser = subparsers.add_parser( - 'disable-alert-policies', - help=enable_alert_policies.__doc__ - ) - disable_alert_policies_parser.add_argument( - '--filter', + "disable-alert-policies", help=enable_alert_policies.__doc__ ) + disable_alert_policies_parser.add_argument("--filter",) replace_notification_channels_parser = subparsers.add_parser( - 'replace-notification-channels', - help=replace_notification_channels.__doc__ + "replace-notification-channels", help=replace_notification_channels.__doc__ ) replace_notification_channels_parser.add_argument( - '-p', '--alert_policy_id', - required=True + "-p", "--alert_policy_id", required=True ) replace_notification_channels_parser.add_argument( - '-c', '--notification_channel_id', - required=True, - action='append' + "-c", "--notification_channel_id", required=True, action="append" ) - backup_parser = subparsers.add_parser( - 'backup', - help=backup.__doc__ - ) - backup_parser.add_argument( - '--backup_to_filename', - required=True - ) + backup_parser = subparsers.add_parser("backup", help=backup.__doc__) + backup_parser.add_argument("--backup_to_filename", required=True) - restore_parser = subparsers.add_parser( - 'restore', - help=restore.__doc__ - ) - restore_parser.add_argument( - '--restore_from_filename', - required=True - ) + restore_parser = subparsers.add_parser("restore", help=restore.__doc__) + restore_parser.add_argument("--restore_from_filename", required=True) args = parser.parse_args() - if args.command == 'list-alert-policies': + if args.command == "list-alert-policies": list_alert_policies(project_name()) - elif args.command == 'list-notification-channels': + elif args.command == "list-notification-channels": list_notification_channels(project_name()) - elif args.command == 'enable-alert-policies': + elif args.command == "enable-alert-policies": enable_alert_policies(project_name(), enable=True, filter_=args.filter) - elif args.command == 'disable-alert-policies': - enable_alert_policies(project_name(), enable=False, - filter_=args.filter) + elif args.command == "disable-alert-policies": + enable_alert_policies(project_name(), enable=False, filter_=args.filter) - elif args.command == 'replace-notification-channels': - replace_notification_channels(project_name(), args.alert_policy_id, - args.notification_channel_id) + elif args.command == "replace-notification-channels": + replace_notification_channels( + project_name(), args.alert_policy_id, args.notification_channel_id + ) - elif args.command == 'backup': + elif args.command == "backup": backup(project_name(), args.backup_to_filename) - elif args.command == 'restore': + elif args.command == "restore": restore(project_name(), args.restore_from_filename) diff --git a/monitoring/api/v3/alerts-client/snippets_test.py b/monitoring/api/v3/alerts-client/snippets_test.py index bd0cf401192..a11ea01a4ef 100644 --- a/monitoring/api/v3/alerts-client/snippets_test.py +++ b/monitoring/api/v3/alerts-client/snippets_test.py @@ -25,8 +25,7 @@ def random_name(length): - return ''.join( - [random.choice(string.ascii_lowercase) for i in range(length)]) + return "".join([random.choice(string.ascii_lowercase) for i in range(length)]) class PochanFixture: @@ -39,25 +38,28 @@ def __init__(self): self.project_name = snippets.project_name() self.alert_policy_client = monitoring_v3.AlertPolicyServiceClient() self.notification_channel_client = ( - monitoring_v3.NotificationChannelServiceClient()) + monitoring_v3.NotificationChannelServiceClient() + ) def __enter__(self): # Create a policy. policy = monitoring_v3.types.alert_pb2.AlertPolicy() - json = open('test_alert_policy.json').read() + json = open("test_alert_policy.json").read() google.protobuf.json_format.Parse(json, policy) - policy.display_name = 'snippets-test-' + random_name(10) + policy.display_name = "snippets-test-" + random_name(10) self.alert_policy = self.alert_policy_client.create_alert_policy( - self.project_name, policy) + self.project_name, policy + ) # Create a notification channel. notification_channel = ( - monitoring_v3.types.notification_pb2.NotificationChannel()) - json = open('test_notification_channel.json').read() + monitoring_v3.types.notification_pb2.NotificationChannel() + ) + json = open("test_notification_channel.json").read() google.protobuf.json_format.Parse(json, notification_channel) - notification_channel.display_name = 'snippets-test-' + random_name(10) - self.notification_channel = ( - self.notification_channel_client.create_notification_channel( - self.project_name, notification_channel)) + notification_channel.display_name = "snippets-test-" + random_name(10) + self.notification_channel = self.notification_channel_client.create_notification_channel( + self.project_name, notification_channel + ) return self def __exit__(self, type, value, traceback): @@ -65,10 +67,11 @@ def __exit__(self, type, value, traceback): self.alert_policy_client.delete_alert_policy(self.alert_policy.name) if self.notification_channel.name: self.notification_channel_client.delete_notification_channel( - self.notification_channel.name) + self.notification_channel.name + ) -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def pochan(): with PochanFixture() as pochan: yield pochan @@ -98,29 +101,32 @@ def test_enable_alert_policies(capsys, pochan): def test_replace_channels(capsys, pochan): - alert_policy_id = pochan.alert_policy.name.split('/')[-1] - notification_channel_id = pochan.notification_channel.name.split('/')[-1] + alert_policy_id = pochan.alert_policy.name.split("/")[-1] + notification_channel_id = pochan.notification_channel.name.split("/")[-1] snippets.replace_notification_channels( - pochan.project_name, alert_policy_id, [notification_channel_id]) + pochan.project_name, alert_policy_id, [notification_channel_id] + ) out, _ = capsys.readouterr() assert "Updated {0}".format(pochan.alert_policy.name) in out def test_backup_and_restore(capsys, pochan): - snippets.backup(pochan.project_name, 'backup.json') + snippets.backup(pochan.project_name, "backup.json") out, _ = capsys.readouterr() - snippets.restore(pochan.project_name, 'backup.json') + snippets.restore(pochan.project_name, "backup.json") out, _ = capsys.readouterr() assert "Updated {0}".format(pochan.alert_policy.name) in out - assert "Updating channel {0}".format( - pochan.notification_channel.display_name) in out + assert ( + "Updating channel {0}".format(pochan.notification_channel.display_name) in out + ) def test_delete_channels(capsys, pochan): - notification_channel_id = pochan.notification_channel.name.split('/')[-1] + notification_channel_id = pochan.notification_channel.name.split("/")[-1] snippets.delete_notification_channels( - pochan.project_name, [notification_channel_id], force=True) + pochan.project_name, [notification_channel_id], force=True + ) out, _ = capsys.readouterr() assert "{0} deleted".format(notification_channel_id) in out - pochan.notification_channel.name = '' # So teardown is not tried + pochan.notification_channel.name = "" # So teardown is not tried diff --git a/monitoring/api/v3/api-client/custom_metric.py b/monitoring/api/v3/api-client/custom_metric.py index 6cf83980357..7a23bc0e72f 100644 --- a/monitoring/api/v3/api-client/custom_metric.py +++ b/monitoring/api/v3/api-client/custom_metric.py @@ -55,8 +55,7 @@ def get_now_rfc3339(): return format_rfc3339(datetime.datetime.utcnow()) -def create_custom_metric(client, project_id, - custom_metric_type, metric_kind): +def create_custom_metric(client, project_id, custom_metric_type, metric_kind): """Create custom metric descriptor""" metrics_descriptor = { "type": custom_metric_type, @@ -64,37 +63,44 @@ def create_custom_metric(client, project_id, { "key": "environment", "valueType": "STRING", - "description": "An arbitrary measurement" + "description": "An arbitrary measurement", } ], "metricKind": metric_kind, "valueType": "INT64", "unit": "items", "description": "An arbitrary measurement.", - "displayName": "Custom Metric" + "displayName": "Custom Metric", } - return client.projects().metricDescriptors().create( - name=project_id, body=metrics_descriptor).execute() + return ( + client.projects() + .metricDescriptors() + .create(name=project_id, body=metrics_descriptor) + .execute() + ) -def delete_metric_descriptor( - client, custom_metric_name): +def delete_metric_descriptor(client, custom_metric_name): """Delete a custom metric descriptor.""" - client.projects().metricDescriptors().delete( - name=custom_metric_name).execute() + client.projects().metricDescriptors().delete(name=custom_metric_name).execute() def get_custom_metric(client, project_id, custom_metric_type): """Retrieve the custom metric we created""" - request = client.projects().metricDescriptors().list( - name=project_id, - filter='metric.type=starts_with("{}")'.format(custom_metric_type)) + request = ( + client.projects() + .metricDescriptors() + .list( + name=project_id, + filter='metric.type=starts_with("{}")'.format(custom_metric_type), + ) + ) response = request.execute() - print('ListCustomMetrics response:') + print("ListCustomMetrics response:") pprint.pprint(response) try: - return response['metricDescriptors'] + return response["metricDescriptors"] except KeyError: return None @@ -108,42 +114,35 @@ def get_custom_data_point(): # [START write_timeseries] -def write_timeseries_value(client, project_resource, - custom_metric_type, instance_id, metric_kind): +def write_timeseries_value( + client, project_resource, custom_metric_type, instance_id, metric_kind +): """Write the custom metric obtained by get_custom_data_point at a point in time.""" # Specify a new data point for the time series. now = get_now_rfc3339() timeseries_data = { - "metric": { - "type": custom_metric_type, - "labels": { - "environment": "STAGING" - } - }, + "metric": {"type": custom_metric_type, "labels": {"environment": "STAGING"}}, "resource": { - "type": 'gce_instance', - "labels": { - 'instance_id': instance_id, - 'zone': 'us-central1-f' - } + "type": "gce_instance", + "labels": {"instance_id": instance_id, "zone": "us-central1-f"}, }, "points": [ { - "interval": { - "startTime": now, - "endTime": now - }, - "value": { - "int64Value": get_custom_data_point() - } + "interval": {"startTime": now, "endTime": now}, + "value": {"int64Value": get_custom_data_point()}, } - ] + ], } - request = client.projects().timeSeries().create( - name=project_resource, body={"timeSeries": [timeseries_data]}) + request = ( + client.projects() + .timeSeries() + .create(name=project_resource, body={"timeSeries": [timeseries_data]}) + ) request.execute() + + # [END write_timeseries] @@ -154,12 +153,17 @@ def read_timeseries(client, project_resource, custom_metric_type): from. :param custom_metric_name: The name of the timeseries we want to read. """ - request = client.projects().timeSeries().list( - name=project_resource, - filter='metric.type="{0}"'.format(custom_metric_type), - pageSize=3, - interval_startTime=get_start_time(), - interval_endTime=get_now_rfc3339()) + request = ( + client.projects() + .timeSeries() + .list( + name=project_resource, + filter='metric.type="{0}"'.format(custom_metric_type), + pageSize=3, + interval_startTime=get_start_time(), + interval_endTime=get_now_rfc3339(), + ) + ) response = request.execute() return response @@ -173,33 +177,32 @@ def main(project_id): METRIC_KIND = "GAUGE" project_resource = "projects/{0}".format(project_id) - client = googleapiclient.discovery.build('monitoring', 'v3') - create_custom_metric(client, project_resource, - CUSTOM_METRIC_TYPE, METRIC_KIND) + client = googleapiclient.discovery.build("monitoring", "v3") + create_custom_metric(client, project_resource, CUSTOM_METRIC_TYPE, METRIC_KIND) custom_metric = None while not custom_metric: # wait until it's created time.sleep(1) - custom_metric = get_custom_metric( - client, project_resource, CUSTOM_METRIC_TYPE) + custom_metric = get_custom_metric(client, project_resource, CUSTOM_METRIC_TYPE) - write_timeseries_value(client, project_resource, - CUSTOM_METRIC_TYPE, INSTANCE_ID, METRIC_KIND) + write_timeseries_value( + client, project_resource, CUSTOM_METRIC_TYPE, INSTANCE_ID, METRIC_KIND + ) # Sometimes on new metric descriptors, writes have a delay in being read # back. 3 seconds should be enough to make sure our read call picks up the # write time.sleep(3) timeseries = read_timeseries(client, project_resource, CUSTOM_METRIC_TYPE) - print('read_timeseries response:\n{}'.format(pprint.pformat(timeseries))) + print("read_timeseries response:\n{}".format(pprint.pformat(timeseries))) -if __name__ == '__main__': +if __name__ == "__main__": parser = argparse.ArgumentParser( - description=__doc__, - formatter_class=argparse.RawDescriptionHelpFormatter + description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter ) parser.add_argument( - '--project_id', help='Project ID you want to access.', required=True) + "--project_id", help="Project ID you want to access.", required=True + ) args = parser.parse_args() main(args.project_id) diff --git a/monitoring/api/v3/api-client/custom_metric_test.py b/monitoring/api/v3/api-client/custom_metric_test.py index e2b0d8f366c..1145b113f97 100644 --- a/monitoring/api/v3/api-client/custom_metric_test.py +++ b/monitoring/api/v3/api-client/custom_metric_test.py @@ -35,21 +35,19 @@ from custom_metric import read_timeseries from custom_metric import write_timeseries_value -PROJECT = os.environ['GCLOUD_PROJECT'] +PROJECT = os.environ["GCLOUD_PROJECT"] """ Custom metric domain for all custom metrics""" CUSTOM_METRIC_DOMAIN = "custom.googleapis.com" -METRIC = 'compute.googleapis.com/instance/cpu/usage_time' -METRIC_NAME = ''.join( - random.choice('0123456789ABCDEF') for i in range(16)) -METRIC_RESOURCE = "{}/{}".format( - CUSTOM_METRIC_DOMAIN, METRIC_NAME) +METRIC = "compute.googleapis.com/instance/cpu/usage_time" +METRIC_NAME = "".join(random.choice("0123456789ABCDEF") for i in range(16)) +METRIC_RESOURCE = "{}/{}".format(CUSTOM_METRIC_DOMAIN, METRIC_NAME) -@pytest.fixture(scope='module') +@pytest.fixture(scope="module") def client(): - return googleapiclient.discovery.build('monitoring', 'v3') + return googleapiclient.discovery.build("monitoring", "v3") @flaky @@ -65,28 +63,27 @@ def test_custom_metric(client): METRIC_KIND = "GAUGE" custom_metric_descriptor = create_custom_metric( - client, PROJECT_RESOURCE, METRIC_RESOURCE, METRIC_KIND) + client, PROJECT_RESOURCE, METRIC_RESOURCE, METRIC_KIND + ) # wait until metric has been created, use the get call to wait until # a response comes back with the new metric custom_metric = None while not custom_metric: time.sleep(1) - custom_metric = get_custom_metric( - client, PROJECT_RESOURCE, METRIC_RESOURCE) + custom_metric = get_custom_metric(client, PROJECT_RESOURCE, METRIC_RESOURCE) - write_timeseries_value(client, PROJECT_RESOURCE, - METRIC_RESOURCE, INSTANCE_ID, - METRIC_KIND) + write_timeseries_value( + client, PROJECT_RESOURCE, METRIC_RESOURCE, INSTANCE_ID, METRIC_KIND + ) # Sometimes on new metric descriptors, writes have a delay in being # read back. Use eventually_consistent to account for this. @eventually_consistent.call def _(): response = read_timeseries(client, PROJECT_RESOURCE, METRIC_RESOURCE) - value = int( - response['timeSeries'][0]['points'][0]['value']['int64Value']) + value = int(response["timeSeries"][0]["points"][0]["value"]["int64Value"]) # using seed of 1 will create a value of 1 assert value == pseudo_random_value - delete_metric_descriptor(client, custom_metric_descriptor['name']) + delete_metric_descriptor(client, custom_metric_descriptor["name"]) diff --git a/monitoring/api/v3/api-client/list_resources.py b/monitoring/api/v3/api-client/list_resources.py index 0c4e27a8259..af55eba860c 100644 --- a/monitoring/api/v3/api-client/list_resources.py +++ b/monitoring/api/v3/api-client/list_resources.py @@ -42,8 +42,7 @@ def get_start_time(): arbitrarily to be an hour ago and 5 minutes """ # Return an hour ago - 5 minutes - start_time = (datetime.datetime.utcnow() - - datetime.timedelta(hours=1, minutes=5)) + start_time = datetime.datetime.utcnow() - datetime.timedelta(hours=1, minutes=5) return format_rfc3339(start_time) @@ -61,58 +60,67 @@ def list_monitored_resource_descriptors(client, project_resource): """Query the projects.monitoredResourceDescriptors.list API method. This lists all the resources available to be monitored in the API. """ - request = client.projects().monitoredResourceDescriptors().list( - name=project_resource) + request = ( + client.projects().monitoredResourceDescriptors().list(name=project_resource) + ) response = request.execute() - print('list_monitored_resource_descriptors response:\n{}'.format( - pprint.pformat(response))) + print( + "list_monitored_resource_descriptors response:\n{}".format( + pprint.pformat(response) + ) + ) def list_metric_descriptors(client, project_resource, metric): """Query to MetricDescriptors.list This lists the metric specified by METRIC. """ - request = client.projects().metricDescriptors().list( - name=project_resource, - filter='metric.type="{}"'.format(metric)) + request = ( + client.projects() + .metricDescriptors() + .list(name=project_resource, filter='metric.type="{}"'.format(metric)) + ) response = request.execute() - print( - 'list_metric_descriptors response:\n{}'.format( - pprint.pformat(response))) + print("list_metric_descriptors response:\n{}".format(pprint.pformat(response))) def list_timeseries(client, project_resource, metric): """Query the TimeSeries.list API method. This lists all the timeseries created between START_TIME and END_TIME. """ - request = client.projects().timeSeries().list( - name=project_resource, - filter='metric.type="{}"'.format(metric), - pageSize=3, - interval_startTime=get_start_time(), - interval_endTime=get_end_time()) + request = ( + client.projects() + .timeSeries() + .list( + name=project_resource, + filter='metric.type="{}"'.format(metric), + pageSize=3, + interval_startTime=get_start_time(), + interval_endTime=get_end_time(), + ) + ) response = request.execute() - print('list_timeseries response:\n{}'.format(pprint.pformat(response))) + print("list_timeseries response:\n{}".format(pprint.pformat(response))) def main(project_id): - client = googleapiclient.discovery.build('monitoring', 'v3') + client = googleapiclient.discovery.build("monitoring", "v3") project_resource = "projects/{}".format(project_id) list_monitored_resource_descriptors(client, project_resource) # Metric to list - metric = 'compute.googleapis.com/instance/cpu/usage_time' + metric = "compute.googleapis.com/instance/cpu/usage_time" list_metric_descriptors(client, project_resource, metric) list_timeseries(client, project_resource, metric) -if __name__ == '__main__': +if __name__ == "__main__": parser = argparse.ArgumentParser( - description=__doc__, - formatter_class=argparse.RawDescriptionHelpFormatter + description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter ) parser.add_argument( - '--project_id', help='Project ID you want to access.', required=True) + "--project_id", help="Project ID you want to access.", required=True + ) args = parser.parse_args() main(args.project_id) diff --git a/monitoring/api/v3/api-client/list_resources_test.py b/monitoring/api/v3/api-client/list_resources_test.py index f2a9bbf035d..897fec3bf1a 100644 --- a/monitoring/api/v3/api-client/list_resources_test.py +++ b/monitoring/api/v3/api-client/list_resources_test.py @@ -29,42 +29,37 @@ import list_resources -PROJECT = os.environ['GCLOUD_PROJECT'] -METRIC = 'compute.googleapis.com/instance/cpu/usage_time' +PROJECT = os.environ["GCLOUD_PROJECT"] +METRIC = "compute.googleapis.com/instance/cpu/usage_time" -@pytest.fixture(scope='module') +@pytest.fixture(scope="module") def client(): - return googleapiclient.discovery.build('monitoring', 'v3') + return googleapiclient.discovery.build("monitoring", "v3") @flaky def test_list_monitored_resources(client, capsys): PROJECT_RESOURCE = "projects/{}".format(PROJECT) - list_resources.list_monitored_resource_descriptors( - client, PROJECT_RESOURCE) + list_resources.list_monitored_resource_descriptors(client, PROJECT_RESOURCE) stdout, _ = capsys.readouterr() - regex = re.compile( - 'An application running', re.I) + regex = re.compile("An application running", re.I) assert regex.search(stdout) is not None @flaky def test_list_metrics(client, capsys): PROJECT_RESOURCE = "projects/{}".format(PROJECT) - list_resources.list_metric_descriptors( - client, PROJECT_RESOURCE, METRIC) + list_resources.list_metric_descriptors(client, PROJECT_RESOURCE, METRIC) stdout, _ = capsys.readouterr() - regex = re.compile( - u'Delta CPU', re.I) + regex = re.compile(u"Delta CPU", re.I) assert regex.search(stdout) is not None @flaky def test_list_timeseries(client, capsys): PROJECT_RESOURCE = "projects/{}".format(PROJECT) - list_resources.list_timeseries( - client, PROJECT_RESOURCE, METRIC) + list_resources.list_timeseries(client, PROJECT_RESOURCE, METRIC) stdout, _ = capsys.readouterr() - regex = re.compile(u'list_timeseries response:\n', re.I) + regex = re.compile(u"list_timeseries response:\n", re.I) assert regex.search(stdout) is not None diff --git a/monitoring/api/v3/cloud-client/quickstart.py b/monitoring/api/v3/cloud-client/quickstart.py index 0527acae545..d8476ee366a 100644 --- a/monitoring/api/v3/cloud-client/quickstart.py +++ b/monitoring/api/v3/cloud-client/quickstart.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +import uuid + def run_quickstart(): # [START monitoring_quickstart] @@ -20,24 +22,27 @@ def run_quickstart(): import time client = monitoring_v3.MetricServiceClient() - project = 'my-project' # TODO: Update to your project ID. + project = "my-project" # TODO: Update to your project ID. project_name = client.project_path(project) series = monitoring_v3.types.TimeSeries() - series.metric.type = 'custom.googleapis.com/my_metric' - series.resource.type = 'gce_instance' - series.resource.labels['instance_id'] = '1234567890123456789' - series.resource.labels['zone'] = 'us-central1-f' + series.metric.type = "custom.googleapis.com/my_metric" + str(uuid.uuid4()) + series.resource.type = "gce_instance" + series.resource.labels["instance_id"] = "1234567890123456789" + series.resource.labels["zone"] = "us-central1-f" point = series.points.add() point.value.double_value = 3.14 now = time.time() point.interval.end_time.seconds = int(now) point.interval.end_time.nanos = int( - (now - point.interval.end_time.seconds) * 10**9) + (now - point.interval.end_time.seconds) * 10 ** 9 + ) client.create_time_series(project_name, [series]) - print('Successfully wrote time series.') + print("Successfully wrote time series.") # [END monitoring_quickstart] + # debug statement used for test cleanup + print("Metric to clean up {}.".format(series.metric.type)) -if __name__ == '__main__': +if __name__ == "__main__": run_quickstart() diff --git a/monitoring/api/v3/cloud-client/quickstart_test.py b/monitoring/api/v3/cloud-client/quickstart_test.py index d9b54b62c48..e551f9d2797 100644 --- a/monitoring/api/v3/cloud-client/quickstart_test.py +++ b/monitoring/api/v3/cloud-client/quickstart_test.py @@ -13,29 +13,36 @@ # limitations under the License. import os +import re import mock import pytest import quickstart +import snippets -PROJECT = os.environ['GCLOUD_PROJECT'] +PROJECT = os.environ["GCLOUD_PROJECT"] @pytest.fixture def mock_project_path(): """Mock out project and replace with project from environment.""" project_patch = mock.patch( - 'google.cloud.monitoring_v3.MetricServiceClient.' - 'project_path') + "google.cloud.monitoring_v3.MetricServiceClient." "project_path" + ) with project_patch as project_mock: - project_mock.return_value = 'projects/{}'.format(PROJECT) + project_mock.return_value = "projects/{}".format(PROJECT) yield project_mock def test_quickstart(capsys, mock_project_path): quickstart.run_quickstart() out, _ = capsys.readouterr() - assert 'wrote' in out + assert "wrote" in out + + # clean up custom metric created as part of quickstart + match = re.search(r"Metric to clean up (.*)\.", out) + metric_name = "projects/{}/metricDescriptors/{}".format(PROJECT, match.group(1)) + snippets.delete_metric_descriptor(metric_name) diff --git a/monitoring/api/v3/cloud-client/snippets.py b/monitoring/api/v3/cloud-client/snippets.py index a0abb654e0c..f4879f395e8 100644 --- a/monitoring/api/v3/cloud-client/snippets.py +++ b/monitoring/api/v3/cloud-client/snippets.py @@ -15,29 +15,23 @@ import argparse import os import pprint -import random import time +import uuid from google.cloud import monitoring_v3 -# Avoid collisions with other runs -RANDOM_SUFFIX = str(random.randint(1000, 9999)) - - def create_metric_descriptor(project_id): # [START monitoring_create_metric] client = monitoring_v3.MetricServiceClient() project_name = client.project_path(project_id) descriptor = monitoring_v3.types.MetricDescriptor() - descriptor.type = 'custom.googleapis.com/my_metric' + RANDOM_SUFFIX - descriptor.metric_kind = ( - monitoring_v3.enums.MetricDescriptor.MetricKind.GAUGE) - descriptor.value_type = ( - monitoring_v3.enums.MetricDescriptor.ValueType.DOUBLE) - descriptor.description = 'This is a simple example of a custom metric.' + descriptor.type = "custom.googleapis.com/my_metric" + str(uuid.uuid4()) + descriptor.metric_kind = monitoring_v3.enums.MetricDescriptor.MetricKind.GAUGE + descriptor.value_type = monitoring_v3.enums.MetricDescriptor.ValueType.DOUBLE + descriptor.description = "This is a simple example of a custom metric." descriptor = client.create_metric_descriptor(project_name, descriptor) - print('Created {}.'.format(descriptor.name)) + print("Created {}.".format(descriptor.name)) # [END monitoring_create_metric] @@ -45,7 +39,7 @@ def delete_metric_descriptor(descriptor_name): # [START monitoring_delete_metric] client = monitoring_v3.MetricServiceClient() client.delete_metric_descriptor(descriptor_name) - print('Deleted metric descriptor {}.'.format(descriptor_name)) + print("Deleted metric descriptor {}.".format(descriptor_name)) # [END monitoring_delete_metric] @@ -55,18 +49,21 @@ def write_time_series(project_id): project_name = client.project_path(project_id) series = monitoring_v3.types.TimeSeries() - series.metric.type = 'custom.googleapis.com/my_metric' + RANDOM_SUFFIX - series.resource.type = 'gce_instance' - series.resource.labels['instance_id'] = '1234567890123456789' - series.resource.labels['zone'] = 'us-central1-f' + series.metric.type = "custom.googleapis.com/my_metric" + str(uuid.uuid4()) + series.resource.type = "gce_instance" + series.resource.labels["instance_id"] = "1234567890123456789" + series.resource.labels["zone"] = "us-central1-f" point = series.points.add() point.value.double_value = 3.14 now = time.time() point.interval.end_time.seconds = int(now) point.interval.end_time.nanos = int( - (now - point.interval.end_time.seconds) * 10**9) + (now - point.interval.end_time.seconds) * 10 ** 9 + ) client.create_time_series(project_name, [series]) # [END monitoring_write_timeseries] + # debug statement used for test cleanup + print("Metric to clean up {}.".format(series.metric.type)) def list_time_series(project_id): @@ -76,15 +73,15 @@ def list_time_series(project_id): interval = monitoring_v3.types.TimeInterval() now = time.time() interval.end_time.seconds = int(now) - interval.end_time.nanos = int( - (now - interval.end_time.seconds) * 10**9) + interval.end_time.nanos = int((now - interval.end_time.seconds) * 10 ** 9) interval.start_time.seconds = int(now - 1200) interval.start_time.nanos = interval.end_time.nanos results = client.list_time_series( project_name, 'metric.type = "compute.googleapis.com/instance/cpu/utilization"', interval, - monitoring_v3.enums.ListTimeSeriesRequest.TimeSeriesView.FULL) + monitoring_v3.enums.ListTimeSeriesRequest.TimeSeriesView.FULL, + ) for result in results: print(result) # [END monitoring_read_timeseries_simple] @@ -97,15 +94,15 @@ def list_time_series_header(project_id): interval = monitoring_v3.types.TimeInterval() now = time.time() interval.end_time.seconds = int(now) - interval.end_time.nanos = int( - (now - interval.end_time.seconds) * 10**9) + interval.end_time.nanos = int((now - interval.end_time.seconds) * 10 ** 9) interval.start_time.seconds = int(now - 1200) interval.start_time.nanos = interval.end_time.nanos results = client.list_time_series( project_name, 'metric.type = "compute.googleapis.com/instance/cpu/utilization"', interval, - monitoring_v3.enums.ListTimeSeriesRequest.TimeSeriesView.HEADERS) + monitoring_v3.enums.ListTimeSeriesRequest.TimeSeriesView.HEADERS, + ) for result in results: print(result) # [END monitoring_read_timeseries_fields] @@ -118,21 +115,20 @@ def list_time_series_aggregate(project_id): interval = monitoring_v3.types.TimeInterval() now = time.time() interval.end_time.seconds = int(now) - interval.end_time.nanos = int( - (now - interval.end_time.seconds) * 10**9) + interval.end_time.nanos = int((now - interval.end_time.seconds) * 10 ** 9) interval.start_time.seconds = int(now - 3600) interval.start_time.nanos = interval.end_time.nanos aggregation = monitoring_v3.types.Aggregation() aggregation.alignment_period.seconds = 1200 # 20 minutes - aggregation.per_series_aligner = ( - monitoring_v3.enums.Aggregation.Aligner.ALIGN_MEAN) + aggregation.per_series_aligner = monitoring_v3.enums.Aggregation.Aligner.ALIGN_MEAN results = client.list_time_series( project_name, 'metric.type = "compute.googleapis.com/instance/cpu/utilization"', interval, monitoring_v3.enums.ListTimeSeriesRequest.TimeSeriesView.FULL, - aggregation) + aggregation, + ) for result in results: print(result) # [END monitoring_read_timeseries_align] @@ -145,24 +141,24 @@ def list_time_series_reduce(project_id): interval = monitoring_v3.types.TimeInterval() now = time.time() interval.end_time.seconds = int(now) - interval.end_time.nanos = int( - (now - interval.end_time.seconds) * 10**9) + interval.end_time.nanos = int((now - interval.end_time.seconds) * 10 ** 9) interval.start_time.seconds = int(now - 3600) interval.start_time.nanos = interval.end_time.nanos aggregation = monitoring_v3.types.Aggregation() aggregation.alignment_period.seconds = 1200 # 20 minutes - aggregation.per_series_aligner = ( - monitoring_v3.enums.Aggregation.Aligner.ALIGN_MEAN) + aggregation.per_series_aligner = monitoring_v3.enums.Aggregation.Aligner.ALIGN_MEAN aggregation.cross_series_reducer = ( - monitoring_v3.enums.Aggregation.Reducer.REDUCE_MEAN) - aggregation.group_by_fields.append('resource.zone') + monitoring_v3.enums.Aggregation.Reducer.REDUCE_MEAN + ) + aggregation.group_by_fields.append("resource.zone") results = client.list_time_series( project_name, 'metric.type = "compute.googleapis.com/instance/cpu/utilization"', interval, monitoring_v3.enums.ListTimeSeriesRequest.TimeSeriesView.FULL, - aggregation) + aggregation, + ) for result in results: print(result) # [END monitoring_read_timeseries_reduce] @@ -181,8 +177,7 @@ def list_monitored_resources(project_id): # [START monitoring_list_resources] client = monitoring_v3.MetricServiceClient() project_name = client.project_path(project_id) - resource_descriptors = ( - client.list_monitored_resource_descriptors(project_name)) + resource_descriptors = client.list_monitored_resource_descriptors(project_name) for descriptor in resource_descriptors: print(descriptor.type) # [END monitoring_list_resources] @@ -192,7 +187,8 @@ def get_monitored_resource_descriptor(project_id, resource_type_name): # [START monitoring_get_resource] client = monitoring_v3.MetricServiceClient() resource_path = client.monitored_resource_descriptor_path( - project_id, resource_type_name) + project_id, resource_type_name + ) pprint.pprint(client.get_monitored_resource_descriptor(resource_path)) # [END monitoring_get_resource] @@ -218,117 +214,104 @@ def project_id(): Returns: str -- the project name """ - project_id = (os.environ['GOOGLE_CLOUD_PROJECT'] or - os.environ['GCLOUD_PROJECT']) + project_id = os.environ["GCLOUD_PROJECT"] if not project_id: raise MissingProjectIdError( - 'Set the environment variable ' + - 'GCLOUD_PROJECT to your Google Cloud Project Id.') + "Set the environment variable " + + "GCLOUD_PROJECT to your Google Cloud Project Id." + ) return project_id -if __name__ == '__main__': +if __name__ == "__main__": parser = argparse.ArgumentParser( - description='Demonstrates Monitoring API operations.') + description="Demonstrates Monitoring API operations." + ) - subparsers = parser.add_subparsers(dest='command') + subparsers = parser.add_subparsers(dest="command") create_metric_descriptor_parser = subparsers.add_parser( - 'create-metric-descriptor', - help=create_metric_descriptor.__doc__ + "create-metric-descriptor", help=create_metric_descriptor.__doc__ ) list_metric_descriptor_parser = subparsers.add_parser( - 'list-metric-descriptors', - help=list_metric_descriptors.__doc__ + "list-metric-descriptors", help=list_metric_descriptors.__doc__ ) get_metric_descriptor_parser = subparsers.add_parser( - 'get-metric-descriptor', - help=get_metric_descriptor.__doc__ + "get-metric-descriptor", help=get_metric_descriptor.__doc__ ) get_metric_descriptor_parser.add_argument( - '--metric-type-name', - help='The metric type of the metric descriptor to see details about.', - required=True + "--metric-type-name", + help="The metric type of the metric descriptor to see details about.", + required=True, ) delete_metric_descriptor_parser = subparsers.add_parser( - 'delete-metric-descriptor', - help=list_metric_descriptors.__doc__ + "delete-metric-descriptor", help=list_metric_descriptors.__doc__ ) delete_metric_descriptor_parser.add_argument( - '--metric-descriptor-name', - help='Metric descriptor to delete', - required=True + "--metric-descriptor-name", help="Metric descriptor to delete", required=True ) list_resources_parser = subparsers.add_parser( - 'list-resources', - help=list_monitored_resources.__doc__ + "list-resources", help=list_monitored_resources.__doc__ ) get_resource_parser = subparsers.add_parser( - 'get-resource', - help=get_monitored_resource_descriptor.__doc__ + "get-resource", help=get_monitored_resource_descriptor.__doc__ ) get_resource_parser.add_argument( - '--resource-type-name', - help='Monitored resource to view more information about.', - required=True + "--resource-type-name", + help="Monitored resource to view more information about.", + required=True, ) write_time_series_parser = subparsers.add_parser( - 'write-time-series', - help=write_time_series.__doc__ + "write-time-series", help=write_time_series.__doc__ ) list_time_series_parser = subparsers.add_parser( - 'list-time-series', - help=list_time_series.__doc__ + "list-time-series", help=list_time_series.__doc__ ) list_time_series_header_parser = subparsers.add_parser( - 'list-time-series-header', - help=list_time_series_header.__doc__ + "list-time-series-header", help=list_time_series_header.__doc__ ) read_time_series_reduce = subparsers.add_parser( - 'list-time-series-reduce', - help=list_time_series_reduce.__doc__ + "list-time-series-reduce", help=list_time_series_reduce.__doc__ ) read_time_series_aggregate = subparsers.add_parser( - 'list-time-series-aggregate', - help=list_time_series_aggregate.__doc__ + "list-time-series-aggregate", help=list_time_series_aggregate.__doc__ ) args = parser.parse_args() - if args.command == 'create-metric-descriptor': + if args.command == "create-metric-descriptor": create_metric_descriptor(project_id()) - if args.command == 'list-metric-descriptors': + if args.command == "list-metric-descriptors": list_metric_descriptors(project_id()) - if args.command == 'get-metric-descriptor': + if args.command == "get-metric-descriptor": get_metric_descriptor(args.metric_type_name) - if args.command == 'delete-metric-descriptor': + if args.command == "delete-metric-descriptor": delete_metric_descriptor(args.metric_descriptor_name) - if args.command == 'list-resources': + if args.command == "list-resources": list_monitored_resources(project_id()) - if args.command == 'get-resource': - get_monitored_resource_descriptor( - project_id(), args.resource_type_name) - if args.command == 'write-time-series': + if args.command == "get-resource": + get_monitored_resource_descriptor(project_id(), args.resource_type_name) + if args.command == "write-time-series": write_time_series(project_id()) - if args.command == 'list-time-series': + if args.command == "list-time-series": list_time_series(project_id()) - if args.command == 'list-time-series-header': + if args.command == "list-time-series-header": list_time_series_header(project_id()) - if args.command == 'list-time-series-reduce': + if args.command == "list-time-series-reduce": list_time_series_reduce(project_id()) - if args.command == 'list-time-series-aggregate': + if args.command == "list-time-series-aggregate": list_time_series_aggregate(project_id()) diff --git a/monitoring/api/v3/cloud-client/snippets_test.py b/monitoring/api/v3/cloud-client/snippets_test.py index bff93575df3..976892411e1 100644 --- a/monitoring/api/v3/cloud-client/snippets_test.py +++ b/monitoring/api/v3/cloud-client/snippets_test.py @@ -13,70 +13,121 @@ # limitations under the License. import re +import pytest +import uuid +import os from gcp_devrel.testing import eventually_consistent +import google.api_core.exceptions import snippets +from google.cloud import monitoring_v3 -def test_create_get_delete_metric_descriptor(capsys): - snippets.create_metric_descriptor(snippets.project_id()) +PROJECT_ID = os.environ["GCLOUD_PROJECT"] + + +@pytest.fixture(scope="module") +def test_custom_metric_descriptor(): + client = monitoring_v3.MetricServiceClient() + project_name = client.project_path(PROJECT_ID) + descriptor = monitoring_v3.types.MetricDescriptor() + descriptor.type = "custom.googleapis.com/my_metric" + str(uuid.uuid4()) + descriptor.metric_kind = monitoring_v3.enums.MetricDescriptor.MetricKind.GAUGE + descriptor.value_type = monitoring_v3.enums.MetricDescriptor.ValueType.DOUBLE + descriptor.description = "This is a simple example of a custom metric." + descriptor_response = client.create_metric_descriptor(project_name, descriptor) + yield descriptor_response.name + try: + snippets.delete_metric_descriptor(descriptor_response.name) + except google.api_core.exceptions.NotFound: + print("Metric already deleted") + + +def test_create_metric_descriptor(capsys): + snippets.create_metric_descriptor(PROJECT_ID) out, _ = capsys.readouterr() - match = re.search(r'Created (.*)\.', out) + assert "Created" in out + assert "custom.googleapis.com/my_metric" in out + # cleanup from test + match = re.search(r"Created (.*)\.", out) metric_name = match.group(1) + snippets.delete_metric_descriptor(metric_name) + + +def test_get_metric_descriptor(test_custom_metric_descriptor, capsys): try: + @eventually_consistent.call def __(): - snippets.get_metric_descriptor(metric_name) + snippets.get_metric_descriptor(test_custom_metric_descriptor) out, _ = capsys.readouterr() - assert 'DOUBLE' in out - finally: - snippets.delete_metric_descriptor(metric_name) - out, _ = capsys.readouterr() - assert 'Deleted metric' in out + assert test_custom_metric_descriptor in out + except Exception as e: + print(e) + + +def test_delete_metric_descriptor(test_custom_metric_descriptor, capsys): + snippets.delete_metric_descriptor(test_custom_metric_descriptor) + out, _ = capsys.readouterr() + assert "Deleted metric" in out + assert test_custom_metric_descriptor in out def test_list_metric_descriptors(capsys): snippets.list_metric_descriptors(snippets.project_id()) out, _ = capsys.readouterr() - assert 'logging.googleapis.com/byte_count' in out + assert "logging.googleapis.com/byte_count" in out def test_list_resources(capsys): snippets.list_monitored_resources(snippets.project_id()) out, _ = capsys.readouterr() - assert 'pubsub_topic' in out + assert "pubsub_topic" in out def test_get_resources(capsys): - snippets.get_monitored_resource_descriptor( - snippets.project_id(), 'pubsub_topic') + snippets.get_monitored_resource_descriptor(snippets.project_id(), "pubsub_topic") out, _ = capsys.readouterr() - assert 'A topic in Google Cloud Pub/Sub' in out + assert "A topic in Google Cloud Pub/Sub" in out -def test_time_series(capsys): +def test_write_time_series(capsys): snippets.write_time_series(snippets.project_id()) + out, _ = capsys.readouterr() + assert "Error" not in out # this method returns nothing unless there is an error + # clean up custom metric created as part of quickstart + match = re.search(r"Metric to clean up (.*)\.", out) + metric_name = "projects/{}/metricDescriptors/{}".format(PROJECT_ID, match.group(1)) + snippets.delete_metric_descriptor(metric_name) + +def test_list_time_series(capsys): snippets.list_time_series(snippets.project_id()) out, _ = capsys.readouterr() - assert 'gce_instance' in out + assert "gce_instance" in out + +def test_list_time_series_header(capsys): snippets.list_time_series_header(snippets.project_id()) out, _ = capsys.readouterr() - assert 'gce_instance' in out + assert "gce_instance" in out + +def test_list_time_series_aggregate(capsys): snippets.list_time_series_aggregate(snippets.project_id()) out, _ = capsys.readouterr() - assert 'points' in out - assert 'interval' in out - assert 'start_time' in out - assert 'end_time' in out + assert "points" in out + assert "interval" in out + assert "start_time" in out + assert "end_time" in out + +def test_list_time_series_reduce(capsys): snippets.list_time_series_reduce(snippets.project_id()) out, _ = capsys.readouterr() - assert 'points' in out - assert 'interval' in out - assert 'start_time' in out - assert 'end_time' in out + assert "points" in out + assert "interval" in out + assert "start_time" in out + assert "end_time" in out diff --git a/monitoring/api/v3/uptime-check-client/snippets.py b/monitoring/api/v3/uptime-check-client/snippets.py index 78c4a5f6394..e9888ff2bfa 100644 --- a/monitoring/api/v3/uptime-check-client/snippets.py +++ b/monitoring/api/v3/uptime-check-client/snippets.py @@ -23,14 +23,12 @@ # [START monitoring_uptime_check_create] -def create_uptime_check_config(project_name, host_name=None, - display_name=None): +def create_uptime_check_config(project_name, host_name=None, display_name=None): config = monitoring_v3.types.uptime_pb2.UptimeCheckConfig() - config.display_name = display_name or 'New uptime check' - config.monitored_resource.type = 'uptime_url' - config.monitored_resource.labels.update( - {'host': host_name or 'example.com'}) - config.http_check.path = '/' + config.display_name = display_name or "New uptime check" + config.monitored_resource.type = "uptime_url" + config.monitored_resource.labels.update({"host": host_name or "example.com"}) + config.http_check.path = "/" config.http_check.port = 80 config.timeout.seconds = 10 config.period.seconds = 300 @@ -39,22 +37,27 @@ def create_uptime_check_config(project_name, host_name=None, new_config = client.create_uptime_check_config(project_name, config) pprint.pprint(new_config) return new_config + + # [END monitoring_uptime_check_create] # [START monitoring_uptime_check_update] -def update_uptime_check_config(config_name, new_display_name=None, - new_http_check_path=None): +def update_uptime_check_config( + config_name, new_display_name=None, new_http_check_path=None +): client = monitoring_v3.UptimeCheckServiceClient() config = client.get_uptime_check_config(config_name) field_mask = monitoring_v3.types.FieldMask() if new_display_name: - field_mask.paths.append('display_name') + field_mask.paths.append("display_name") config.display_name = new_display_name if new_http_check_path: - field_mask.paths.append('http_check.path') + field_mask.paths.append("http_check.path") config.http_check.path = new_http_check_path client.update_uptime_check_config(config, field_mask) + + # [END monitoring_uptime_check_update] @@ -65,6 +68,8 @@ def list_uptime_check_configs(project_name): for config in configs: pprint.pprint(config) + + # [END monitoring_uptime_check_list_configs] @@ -72,10 +77,14 @@ def list_uptime_check_configs(project_name): def list_uptime_check_ips(): client = monitoring_v3.UptimeCheckServiceClient() ips = client.list_uptime_check_ips() - print(tabulate.tabulate( - [(ip.region, ip.location, ip.ip_address) for ip in ips], - ('region', 'location', 'ip_address') - )) + print( + tabulate.tabulate( + [(ip.region, ip.location, ip.ip_address) for ip in ips], + ("region", "location", "ip_address"), + ) + ) + + # [END monitoring_uptime_check_list_ips] @@ -84,6 +93,8 @@ def get_uptime_check_config(config_name): client = monitoring_v3.UptimeCheckServiceClient() config = client.get_uptime_check_config(config_name) pprint.pprint(config) + + # [END monitoring_uptime_check_get] @@ -91,7 +102,9 @@ def get_uptime_check_config(config_name): def delete_uptime_check_config(config_name): client = monitoring_v3.UptimeCheckServiceClient() client.delete_uptime_check_config(config_name) - print('Deleted ', config_name) + print("Deleted ", config_name) + + # [END monitoring_uptime_check_delete] @@ -108,106 +121,94 @@ def project_id(): Returns: str -- the project name """ - project_id = os.environ['GCLOUD_PROJECT'] + project_id = os.environ["GCLOUD_PROJECT"] if not project_id: raise MissingProjectIdError( - 'Set the environment variable ' + - 'GCLOUD_PROJECT to your Google Cloud Project Id.') + "Set the environment variable " + + "GCLOUD_PROJECT to your Google Cloud Project Id." + ) return project_id def project_name(): - return 'projects/' + project_id() + return "projects/" + project_id() -if __name__ == '__main__': +if __name__ == "__main__": parser = argparse.ArgumentParser( - description='Demonstrates Uptime Check API operations.') + description="Demonstrates Uptime Check API operations." + ) - subparsers = parser.add_subparsers(dest='command') + subparsers = parser.add_subparsers(dest="command") list_uptime_check_configs_parser = subparsers.add_parser( - 'list-uptime-check-configs', - help=list_uptime_check_configs.__doc__ + "list-uptime-check-configs", help=list_uptime_check_configs.__doc__ ) list_uptime_check_ips_parser = subparsers.add_parser( - 'list-uptime-check-ips', - help=list_uptime_check_ips.__doc__ + "list-uptime-check-ips", help=list_uptime_check_ips.__doc__ ) create_uptime_check_config_parser = subparsers.add_parser( - 'create-uptime-check', - help=create_uptime_check_config.__doc__ + "create-uptime-check", help=create_uptime_check_config.__doc__ ) create_uptime_check_config_parser.add_argument( - '-d', '--display_name', - required=False, + "-d", "--display_name", required=False, ) create_uptime_check_config_parser.add_argument( - '-o', '--host_name', - required=False, + "-o", "--host_name", required=False, ) get_uptime_check_config_parser = subparsers.add_parser( - 'get-uptime-check-config', - help=get_uptime_check_config.__doc__ + "get-uptime-check-config", help=get_uptime_check_config.__doc__ ) get_uptime_check_config_parser.add_argument( - '-m', '--name', - required=True, + "-m", "--name", required=True, ) delete_uptime_check_config_parser = subparsers.add_parser( - 'delete-uptime-check-config', - help=delete_uptime_check_config.__doc__ + "delete-uptime-check-config", help=delete_uptime_check_config.__doc__ ) delete_uptime_check_config_parser.add_argument( - '-m', '--name', - required=True, + "-m", "--name", required=True, ) update_uptime_check_config_parser = subparsers.add_parser( - 'update-uptime-check-config', - help=update_uptime_check_config.__doc__ + "update-uptime-check-config", help=update_uptime_check_config.__doc__ ) update_uptime_check_config_parser.add_argument( - '-m', '--name', - required=True, + "-m", "--name", required=True, ) update_uptime_check_config_parser.add_argument( - '-d', '--display_name', - required=False, + "-d", "--display_name", required=False, ) update_uptime_check_config_parser.add_argument( - '-p', '--uptime_check_path', - required=False, + "-p", "--uptime_check_path", required=False, ) args = parser.parse_args() - if args.command == 'list-uptime-check-configs': + if args.command == "list-uptime-check-configs": list_uptime_check_configs(project_name()) - elif args.command == 'list-uptime-check-ips': + elif args.command == "list-uptime-check-ips": list_uptime_check_ips() - elif args.command == 'create-uptime-check': - create_uptime_check_config(project_name(), args.host_name, - args.display_name) + elif args.command == "create-uptime-check": + create_uptime_check_config(project_name(), args.host_name, args.display_name) - elif args.command == 'get-uptime-check-config': + elif args.command == "get-uptime-check-config": get_uptime_check_config(args.name) - elif args.command == 'delete-uptime-check-config': + elif args.command == "delete-uptime-check-config": delete_uptime_check_config(args.name) - elif args.command == 'update-uptime-check-config': + elif args.command == "update-uptime-check-config": if not args.display_name and not args.uptime_check_path: - print('Nothing to update. Pass --display_name or ' - '--uptime_check_path.') + print("Nothing to update. Pass --display_name or " "--uptime_check_path.") else: - update_uptime_check_config(args.name, args.display_name, - args.uptime_check_path) + update_uptime_check_config( + args.name, args.display_name, args.uptime_check_path + ) diff --git a/monitoring/api/v3/uptime-check-client/snippets_test.py b/monitoring/api/v3/uptime-check-client/snippets_test.py index 1411607c37c..9f9a66f47fb 100644 --- a/monitoring/api/v3/uptime-check-client/snippets_test.py +++ b/monitoring/api/v3/uptime-check-client/snippets_test.py @@ -23,8 +23,7 @@ def random_name(length): - return ''.join( - [random.choice(string.ascii_lowercase) for i in range(length)]) + return "".join([random.choice(string.ascii_lowercase) for i in range(length)]) class UptimeFixture: @@ -38,7 +37,8 @@ def __init__(self): def __enter__(self): # Create an uptime check config. self.config = snippets.create_uptime_check_config( - self.project_name, display_name=random_name(10)) + self.project_name, display_name=random_name(10) + ) return self def __exit__(self, type, value, traceback): @@ -46,7 +46,7 @@ def __exit__(self, type, value, traceback): snippets.delete_uptime_check_config(self.config.name) -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def uptime(): with UptimeFixture() as uptime: yield uptime @@ -61,10 +61,11 @@ def test_create_and_delete(capsys): def test_update_uptime_config(capsys): # create and delete happen in uptime fixture. new_display_name = random_name(10) - new_uptime_check_path = '/' + random_name(10) + new_uptime_check_path = "/" + random_name(10) with UptimeFixture() as fixture: snippets.update_uptime_check_config( - fixture.config.name, new_display_name, new_uptime_check_path) + fixture.config.name, new_display_name, new_uptime_check_path + ) out, _ = capsys.readouterr() snippets.get_uptime_check_config(fixture.config.name) out, _ = capsys.readouterr() @@ -87,4 +88,4 @@ def test_list_uptime_check_configs(capsys, uptime): def test_list_uptime_check_ips(capsys): snippets.list_uptime_check_ips() out, _ = capsys.readouterr() - assert 'Singapore' in out + assert "Singapore" in out