def search_app()

in Allura/allura/lib/search.py [0:0]


def search_app(q='', fq=None, app=True, **kw):
    """Helper for app/project search.

    Uses dismax query parser. Matches on `title` and `text`. Handles paging, sorting, etc
    """
    from allura.model import ArtifactReference
    from allura.lib.security import has_access

    history = kw.pop('history', None)
    if app and kw.pop('project', False):
        # Used from app's search controller. If `project` is True, redirect to
        # 'entire project search' page
        redirect(c.project.url() + 'search/?' +
                 urlencode(dict(q=q, history=history)))
    search_comments = kw.pop('search_comments', None)
    limit = kw.pop('limit', None)
    page = kw.pop('page', 0)
    default = kw.pop('default', 25)
    allowed_types = kw.pop('allowed_types', [])
    parser = kw.pop('parser', None)
    sort = kw.pop('sort', 'score desc')
    fq = fq if fq else []
    search_error = None
    results = []
    count = 0
    matches = {}
    limit, page, start = g.handle_paging(limit, page, default=default)
    if not q:
        q = ''
    else:
        # Match on both `title` and `text` by default, using 'dismax' parser.
        # Score on `title` matches is boosted, so title match is better than body match.
        # It's 'fuzzier' than standard parser, which matches only on `text`.
        if search_comments:
            allowed_types += ['Post']
        if app:
            fq = [
                'project_id_s:%s' % c.project._id,
                'mount_point_s:%s' % c.app.config.options.mount_point,
                '-deleted_b:true',
                'type_s:(%s)' % ' OR '.join(
                    ['"%s"' % t for t in allowed_types])
            ] + fq
        search_params = {
            'qt': 'dismax',
            'qf': 'title^2 text',
            'pf': 'title^2 text',
            'fq': fq,
            'hl': 'true',
            'hl.simple.pre': '#ALLURA-HIGHLIGHT-START#',
            'hl.simple.post': '#ALLURA-HIGHLIGHT-END#',
            'sort': sort,
        }
        if not history:
            search_params['fq'].append('is_history_b:False')
        if parser == 'standard':
            search_params.pop('qt', None)
            search_params.pop('qf', None)
            search_params.pop('pf', None)
        try:
            results = search(
                q, short_timeout=True, ignore_errors=False,
                rows=limit, start=start, **search_params)
        except SearchError as e:
            search_error = e
        if results:
            count = results.hits
            matches = results.highlighting

            def historize_urls(doc):
                if doc.get('type_s', '').endswith(' Snapshot'):
                    if doc.get('url_s'):
                        doc['url_s'] = doc['url_s'] + \
                            '?version=%s' % doc.get('version_i')
                return doc

            def add_matches(doc):
                m = matches.get(doc['id'], {})
                title = h.get_first(m, 'title')
                text = h.get_first(m, 'text')
                if title:
                    title = (markupsafe.escape(title)
                             .replace('#ALLURA-HIGHLIGHT-START#', markupsafe.Markup('<strong>'))
                             .replace('#ALLURA-HIGHLIGHT-END#', markupsafe.Markup('</strong>')))
                if text:
                    text = (markupsafe.escape(text)
                            .replace('#ALLURA-HIGHLIGHT-START#', markupsafe.Markup('<strong>'))
                            .replace('#ALLURA-HIGHLIGHT-END#', markupsafe.Markup('</strong>')))
                doc['title_match'] = title
                doc['text_match'] = text or h.get_first(doc, 'text')
                return doc

            def paginate_comment_urls(doc):
                if doc.get('type_s', '') == 'Post':
                    artifact = doc['_artifact']
                    if artifact:
                        doc['url_paginated'] = artifact.url_paginated()
                return doc

            def filter_unauthorized(doc):
                aref = ArtifactReference.query.get(_id=doc.get('id'))
                # cache for paginate_comment_urls to re-use
                doc['_artifact'] = aref and aref.artifact
                # .primary() necessary so that a ticket's comment for example is checked with the ticket's perms
                if doc['_artifact'] and not has_access(doc['_artifact'].primary(), 'read', c.user):
                    return None
                else:
                    return doc

            filtered_results = [_f for _f in map(filter_unauthorized, results) if _f]
            count -= len(results) - len(filtered_results)
            results = filtered_results
            results = map(historize_urls, results)
            results = map(add_matches, results)
            results = map(paginate_comment_urls, results)

    # Provide sort urls to the view
    score_url = 'score desc'
    date_url = 'mod_date_dt desc'
    try:
        field, order = sort.split(' ')
    except ValueError:
        field, order = 'score', 'desc'
    sort = ' '.join([field, 'asc' if order == 'desc' else 'desc'])
    if field == 'score':
        score_url = sort
    elif field == 'mod_date_dt':
        date_url = sort
    params = request.GET.copy()
    params.update({'sort': score_url})
    score_url = url(request.path, params=params)
    params.update({'sort': date_url})
    date_url = url(request.path, params=params)
    return dict(q=q, history=history, results=list(results) or [],
                count=count, limit=limit, page=page, search_error=search_error,
                sort_score_url=score_url, sort_date_url=date_url,
                sort_field=field)