func()

in lib/dsstore/datastore.go [213:291]


func (s *Store) MultiReadTx(datatype, realm, user, id string, filters [][]storage.Filter, offset, pageSize int, typ proto.Message, tx storage.Tx) (_ *storage.Results, ferr error) {
	ctx := context.Background() /* TODO: pass ctx from request */
	if tx == nil {
		var err error
		tx, err = s.Tx(false)
		if err != nil {
			return nil, err
		}
		defer func() {
			err := tx.Finish()
			if ferr == nil {
				ferr = err
			}
		}()
	}

	if pageSize > storage.MaxPageSize {
		pageSize = storage.MaxPageSize
	}

	q := datastore.NewQuery(entityKind).
		Filter("service =", s.service).
		Filter("type =", datatype)
	if realm != storage.AllRealms {
		q = q.Filter("realm =", realm)
	}
	if user != storage.MatchAllUsers {
		q = q.Filter("user_id = ", user)
	}
	if id != storage.MatchAllIDs {
		q = q.Filter("id = ", id)
	}
	q = q.Filter("rev = ", storage.LatestRev).Order("id")
	if len(filters) == 0 {
		// No post-filtering, so limit the query directly as an optimization.
		// Still can't use q.Limit(pageSize) because we want the total number of matches.
		q = q.Offset(offset)
		offset = 0
	}

	results := storage.NewResults()
	it := s.client.Run(ctx, q)
	for {
		var e Entity
		_, err := it.Next(&e)
		if err == iterator.Done {
			break
		}
		if err != nil {
			return nil, err
		}
		if len(e.Content) == 0 {
			continue
		}
		p := proto.Clone(typ)
		if err := jsonpb.Unmarshal(strings.NewReader(e.Content), p); err != nil {
			return nil, err
		}
		if !storage.MatchProtoFilters(filters, p) {
			continue
		}
		// Offset cannot use q.Offset(x) because it must match complex filters above.
		// For pagination, decrease any remaining offset before accepting this entry.
		if offset > 0 {
			offset--
			continue
		}
		if pageSize == 0 || pageSize > results.MatchCount {
			results.Entries = append(results.Entries, &storage.Entry{
				Realm:   realm,
				GroupID: e.User,
				ItemID:  e.ID,
				Item:    p,
			})
		}
		results.MatchCount++
	}
	return results, nil
}