in pontoon/insights/utils.py [0:0]
def get_insights(locale=None, project=None):
"""Get data required by the Insights tab."""
start_date = get_insight_start_date()
snapshots = ProjectLocaleInsightsSnapshot.objects.filter(created_at__gte=start_date)
if locale:
snapshots = snapshots.filter(project_locale__locale=locale)
if project:
snapshots = snapshots.filter(project_locale__project=project)
insights = (
snapshots
# Truncate to month and add to select list
.annotate(month=TruncMonth("created_at"))
# Group By month
.values("month")
# Select the avg/sum of the grouping
.annotate(snapshots_count=Count("created_at", distinct=True))
.annotate(locales_count=Count("project_locale__locale", distinct=True))
.annotate(completion_avg=Avg("completion"))
.annotate(human_translations_sum=Sum("human_translations"))
.annotate(machinery_sum=Sum("machinery_translations"))
.annotate(new_source_strings_sum=Sum("new_source_strings"))
.annotate(unreviewed_sum=Sum("unreviewed_strings"))
.annotate(peer_approved_sum=Sum("peer_approved"))
.annotate(self_approved_sum=Sum("self_approved"))
.annotate(rejected_sum=Sum("rejected"))
.annotate(new_suggestions_sum=Sum("new_suggestions"))
.annotate(
pretranslations_chrf_score_avg=Avg(
"pretranslations_chrf_score",
filter=Q(pretranslations_chrf_score__isnull=False),
)
)
.annotate(pretranslations_approved_sum=Sum("pretranslations_approved"))
.annotate(pretranslations_rejected_sum=Sum("pretranslations_rejected"))
.annotate(pretranslations_new_sum=Sum("pretranslations_new"))
# Select month and values
.values(
"month",
"snapshots_count",
"locales_count",
"completion_avg",
"human_translations_sum",
"machinery_sum",
"new_source_strings_sum",
"unreviewed_sum",
"peer_approved_sum",
"self_approved_sum",
"rejected_sum",
"new_suggestions_sum",
"pretranslations_chrf_score_avg",
"pretranslations_approved_sum",
"pretranslations_rejected_sum",
"pretranslations_new_sum",
)
.order_by("month")
)
return {
"dates": [convert_to_unix_time(x["month"]) for x in insights],
"translation_activity": {
"completion": [round(x["completion_avg"], 2) for x in insights],
"human_translations": [x["human_translations_sum"] for x in insights],
"machinery_translations": [x["machinery_sum"] for x in insights],
# The same new source strings are added to each locale, so they need to be normalised
"new_source_strings": [
int(
round(
x["new_source_strings_sum"]
/ (x["locales_count"] if x["locales_count"] != 0 else 1)
)
)
for x in insights
],
},
"review_activity": {
# Unreviewed is not a delta, so use an average for the whole month
"unreviewed": [
int(
round(
x["unreviewed_sum"]
/ (x["snapshots_count"] if x["snapshots_count"] != 0 else 1)
)
)
for x in insights
],
"peer_approved": [x["peer_approved_sum"] for x in insights],
"self_approved": [x["self_approved_sum"] for x in insights],
"rejected": [x["rejected_sum"] for x in insights],
"new_suggestions": [x["new_suggestions_sum"] for x in insights],
},
"pretranslation_quality": {
"approval_rate": json.dumps([get_approval_rate(x) for x in insights]),
"chrf_score": json.dumps([get_chrf_score(x) for x in insights]),
"approved": [x["pretranslations_approved_sum"] for x in insights],
"rejected": [x["pretranslations_rejected_sum"] for x in insights],
"new": [x["pretranslations_new_sum"] for x in insights],
},
}