in src/olympia/addons/indexers.py [0:0]
def extract_document(cls, obj):
"""Extract indexable attributes from an add-on."""
from olympia.addons.models import Preview
attrs = (
'id',
'average_daily_users',
'bayesian_rating',
'contributions',
'created',
'default_locale',
'guid',
'hotness',
'icon_hash',
'icon_type',
'is_disabled',
'is_experimental',
'last_updated',
'modified',
'requires_payment',
'slug',
'status',
'type',
'weekly_downloads',
)
data = {attr: getattr(obj, attr) for attr in attrs}
data['colors'] = None
# Extract dominant colors from static themes.
if obj.type == amo.ADDON_STATICTHEME:
if obj.current_previews:
data['colors'] = obj.current_previews[0].colors
data['app'] = [app.id for app in obj.compatible_apps.keys()]
# Boost by the number of users on a logarithmic scale.
data['boost'] = float(data['average_daily_users'] ** 0.2)
# Quadruple the boost if the add-on is public.
if (
obj.status == amo.STATUS_APPROVED
and not obj.is_experimental
and 'boost' in data
):
data['boost'] = float(max(data['boost'], 1) * 4)
# We can use all_categories because the indexing code goes through the
# transformer that sets it.
data['category'] = [cat.id for cat in obj.all_categories]
data['current_version'] = cls.extract_version(obj, obj.current_version)
data['listed_authors'] = [
{'name': a.name, 'id': a.id, 'username': a.username}
for a in obj.listed_authors
]
data['has_eula'] = bool(obj.eula)
data['has_privacy_policy'] = bool(obj.privacy_policy)
data['is_recommended'] = any(
PROMOTED_GROUP_CHOICES.RECOMMENDED == promotion.group_id
for promotion in obj.publicly_promoted_groups
)
data['previews'] = [
{
'id': preview.id,
'modified': preview.modified,
'sizes': preview.sizes,
'position': preview.position,
}
for preview in obj.current_previews
]
data['promoted'] = [
{
'group_id': promotion.group_id,
# store the app approvals because .approved_applications needs it.
'approved_for_apps': [
app.id for app in obj.approved_applications_for(promotion)
],
}
for promotion in obj.publicly_promoted_groups
]
data['ratings'] = {
'average': obj.average_rating,
'count': obj.total_ratings,
'text_count': obj.text_ratings_count,
}
# We can use tag_list because the indexing code goes through the
# transformer that sets it (attach_tags).
data['tags'] = getattr(obj, 'tag_list', [])
# Handle localized fields.
# First, deal with the 3 fields that need everything:
for field in ('description', 'name', 'summary'):
data.update(cls.extract_field_api_translations(obj, field))
data.update(
cls.extract_field_search_translation(obj, field, obj.default_locale)
)
data.update(cls.extract_field_analyzed_translations(obj, field))
# Then add fields that only need to be returned to the API without
# contributing to search relevancy.
for field in ('developer_comments', 'homepage', 'support_email', 'support_url'):
data.update(cls.extract_field_api_translations(obj, field))
if obj.type != amo.ADDON_STATICTHEME:
# Also do that for preview captions, which are set on each preview
# object.
attach_trans_dict(Preview, obj.current_previews)
for i, preview in enumerate(obj.current_previews):
data['previews'][i].update(
cls.extract_field_api_translations(preview, 'caption')
)
return data