in blueprints/cloud-operations/network-quota-monitoring/src/plugins/monitoring.py [0:0]
def timeseries_requests(project_id, root, timeseries, descriptors):
'Returns create requests for timeseries.'
descriptor_valuetypes = {d.type: d.is_ratio for d in descriptors}
end_time = ''.join((datetime.datetime.utcnow().isoformat('T'), 'Z'))
type_base = DESCRIPTOR_TYPE_BASE.format(root)
url = TIMESERIES_URL.format(project_id)
# group timeseries in buckets by their type so that multiple timeseries
# can be grouped in a single API request without grouping duplicates types
ts_buckets = {}
for ts in timeseries:
bucket = ts_buckets.setdefault(ts.metric, collections.deque())
bucket.append(ts)
LOGGER.info(f'metric types {list(ts_buckets.keys())}')
ts_buckets = list(ts_buckets.values())
api_calls, t = 0, time.time()
while ts_buckets:
data = {'timeSeries': []}
for bucket in ts_buckets:
ts = bucket.popleft()
if descriptor_valuetypes[ts.metric]:
pv = 'doubleValue'
else:
pv = 'int64Value'
data['timeSeries'].append({
'metric': {
'type': f'{type_base}{ts.metric}',
'labels': ts.labels
},
'resource': {
'type': 'global'
},
'points': [{
'interval': {
'endTime': end_time
},
'value': {
pv: ts.value
}
}]
})
req_num = len(data['timeSeries'])
tot_num = sum(len(b) for b in ts_buckets)
LOGGER.info(f'sending {req_num} remaining: {tot_num}')
yield HTTPRequest(url, HEADERS, json.dumps(data))
api_calls += 1
# Default quota is 180 request per minute per user
if api_calls >= 170:
td = time.time() - t
if td < 60:
LOGGER.info(
f'Pausing for {round(60 - td)}s to avoid monitoring quota issues')
time.sleep(60 - td)
api_calls, t = 0, time.time()
ts_buckets = [b for b in ts_buckets if b]