in treeherder/etl/perf.py [0:0]
def _load_perf_datum(job: Job, perf_datum: dict):
validate_perf_data(perf_datum)
extra_properties = {}
reference_data = {
"option_collection_hash": job.signature.option_collection_hash,
"machine_platform": job.signature.machine_platform,
}
option_collection = OptionCollection.objects.get(
option_collection_hash=job.signature.option_collection_hash
)
try:
framework = PerformanceFramework.objects.get(name=perf_datum["framework"]["name"])
except PerformanceFramework.DoesNotExist:
if perf_datum["framework"]["name"] == "job_resource_usage":
return
logger.warning(
f"Performance framework {perf_datum['framework']['name']} does not exist, skipping load of performance artifacts"
)
return
if not framework.enabled:
logger.info(
f"Performance framework {perf_datum['framework']['name']} is not enabled, skipping"
)
return
application = _get_application_name(perf_datum)
application_version = _get_application_version(perf_datum)
for suite in perf_datum["suites"]:
suite_extra_properties = copy.copy(extra_properties)
ordered_tags = _order_and_concat(suite.get("tags", []))
deduced_timestamp, is_multi_commit = _deduce_push_timestamp(perf_datum, job.push.time)
suite_extra_options = ""
if suite.get("extraOptions"):
suite_extra_properties = {"test_options": sorted(suite["extraOptions"])}
suite_extra_options = _order_and_concat(suite["extraOptions"])
summary_signature_hash = None
# if we have a summary value, create or get its signature by all its subtest
# properties.
if suite.get("value") is not None:
# summary series
summary_properties = {"suite": suite["name"]}
summary_properties.update(reference_data)
summary_properties.update(suite_extra_properties)
summary_signature_hash = _get_signature_hash(summary_properties)
signature = _create_or_update_signature(
job.repository,
summary_signature_hash,
framework,
application,
{
"test": "",
"suite": suite["name"],
"suite_public_name": suite.get("publicName"),
"option_collection": option_collection,
"platform": job.machine_platform,
"tags": ordered_tags,
"extra_options": suite_extra_options,
"measurement_unit": suite.get("unit"),
"lower_is_better": suite.get("lowerIsBetter", True),
"has_subtests": True,
# these properties below can be either True, False, or null
# (None). Null indicates no preference has been set.
"should_alert": suite.get("shouldAlert"),
"alert_change_type": PerformanceSignature._get_alert_change_type(
suite.get("alertChangeType")
),
"alert_threshold": suite.get("alertThreshold"),
"min_back_window": suite.get("minBackWindow"),
"max_back_window": suite.get("maxBackWindow"),
"fore_window": suite.get("foreWindow"),
"last_updated": job.push.time,
},
)
(suite_datum, datum_created) = PerformanceDatum.objects.get_or_create(
repository=job.repository,
job=job,
push=job.push,
signature=signature,
push_timestamp=deduced_timestamp,
defaults={"value": suite["value"], "application_version": application_version},
)
if suite_datum.should_mark_as_multi_commit(is_multi_commit, datum_created):
# keep a register with all multi commit perf data
MultiCommitDatum.objects.create(perf_datum=suite_datum)
if _suite_should_alert_based_on(signature, job, datum_created):
generate_alerts.apply_async(args=[signature.id], queue="generate_perf_alerts")
for subtest in suite["subtests"]:
subtest_properties = {"suite": suite["name"], "test": subtest["name"]}
subtest_properties.update(reference_data)
subtest_properties.update(suite_extra_properties)
summary_signature = None
if summary_signature_hash is not None:
subtest_properties.update({"parent_signature": summary_signature_hash})
summary_signature = PerformanceSignature.objects.get(
repository=job.repository,
framework=framework,
signature_hash=summary_signature_hash,
application=application,
)
subtest_signature_hash = _get_signature_hash(subtest_properties)
value = list(
subtest["value"]
for subtest in suite["subtests"]
if subtest["name"] == subtest_properties["test"]
)
signature = _create_or_update_signature(
job.repository,
subtest_signature_hash,
framework,
application,
{
"test": subtest_properties["test"],
"suite": suite["name"],
"test_public_name": subtest.get("publicName"),
"suite_public_name": suite.get("publicName"),
"option_collection": option_collection,
"platform": job.machine_platform,
"tags": ordered_tags,
"extra_options": suite_extra_options,
"measurement_unit": subtest.get("unit"),
"lower_is_better": subtest.get("lowerIsBetter", True),
"has_subtests": False,
# these properties below can be either True, False, or
# null (None). Null indicates no preference has been
# set.
"should_alert": subtest.get("shouldAlert"),
"alert_change_type": PerformanceSignature._get_alert_change_type(
subtest.get("alertChangeType")
),
"alert_threshold": subtest.get("alertThreshold"),
"min_back_window": subtest.get("minBackWindow"),
"max_back_window": subtest.get("maxBackWindow"),
"fore_window": subtest.get("foreWindow"),
"parent_signature": summary_signature,
"last_updated": job.push.time,
},
)
(subtest_datum, datum_created) = PerformanceDatum.objects.get_or_create(
repository=job.repository,
job=job,
push=job.push,
signature=signature,
push_timestamp=deduced_timestamp,
defaults={"value": value[0], "application_version": application_version},
)
if _test_should_gather_replicates_based_on(
job.repository, suite["name"], subtest.get("replicates", [])
):
try:
# Add the replicates to the PerformanceDatumReplicate table, and
# catch and ignore any exceptions that are produced here so we don't
# impact the standard workflow
PerformanceDatumReplicate.objects.bulk_create(
[
PerformanceDatumReplicate(
value=replicate, performance_datum=subtest_datum
)
for replicate in subtest["replicates"]
]
)
except Exception as e:
logger.info(f"Failed to ingest replicates for datum {subtest_datum}: {e}")
if subtest_datum.should_mark_as_multi_commit(is_multi_commit, datum_created):
# keep a register with all multi commit perf data
MultiCommitDatum.objects.create(perf_datum=subtest_datum)
if _test_should_alert_based_on(signature, job, datum_created, suite):
generate_alerts.apply_async(args=[signature.id], queue="generate_perf_alerts")