in blueprints/cloud-operations/compute-quota-monitoring/src/main.py [0:0]
def _main(monitoring_project, discovery_root=None, projects=None, regions=None,
include=None, exclude=None, dry_run=False, verbose=False):
"""Module entry point used by cli and cloud function wrappers."""
configure_logging(verbose=verbose)
# default to monitoring scope project if projects parameter is not passed, then merge the list with discovered projects, if any
regions = regions or ['global']
include = set(include or [])
exclude = set(exclude or [])
projects = projects or [monitoring_project]
if (discovery_root):
projects = set(list(projects) + list(discover_projects(discovery_root)))
for k in ('monitoring_project', 'projects', 'regions', 'include', 'exclude'):
logging.debug(f'{k} {locals().get(k)}')
timeseries = []
logging.info(f'get quotas ({len(projects)} projects {len(regions)} regions)')
for project in projects:
for region in regions:
logging.info(f'get quota for {project} in {region}')
for quota in get_quotas(project, region):
metric = quota.metric.lower()
if include and not any(metric.startswith(k) for k in include):
logging.debug(f'skipping {project}:{region}:{metric} not included')
continue
if exclude and any(metric.startswith(k) for k in exclude):
logging.debug(f'skipping {project}:{region}:{metric} excluded')
continue
logging.debug(f'quota {project}:{region}:{metric}')
timeseries += list(quota.timeseries)
logging.info(f'{len(timeseries)} timeseries')
i, l = 0, len(timeseries)
for batch in batched(timeseries, 30):
data = list(batch)
logging.info(f'sending {len(batch)} timeseries out of {l - i}/{l} left')
i += len(batch)
if not dry_run:
write_timeseries(monitoring_project, {'timeSeries': list(data)})
elif verbose:
print(data)
logging.info(f'{l} timeseries done (dry run {dry_run})')