in internal/pkg/bulk/opSearch.go [123:240]
func (b *Bulker) flushSearch(ctx context.Context, queue queueT) error {
start := time.Now()
const kRoughEstimatePerItem = 256
bufSz := queue.cnt * kRoughEstimatePerItem
if bufSz < queue.pending {
bufSz = queue.pending
}
var buf bytes.Buffer
buf.Grow(bufSz)
queueCnt := 0
links := []apm.SpanLink{}
for n := queue.head; n != nil; n = n.next {
buf.Write(n.buf.Bytes())
queueCnt += 1
if n.spanLink != nil {
links = append(links, *n.spanLink)
}
}
if len(links) == 0 {
links = nil
}
span, ctx := apm.StartSpanOptions(ctx, "Flush: search", "search", apm.SpanOptions{
Links: links,
})
defer span.End()
// Do actual bulk request; and send response on chan
var (
res *esapi.Response
err error
)
if queue.ty == kQueueFleetSearch {
req := esapi.FleetMsearchRequest{
Body: bytes.NewReader(buf.Bytes()),
}
res, err = req.Do(ctx, b.es)
} else {
req := esapi.MsearchRequest{
Body: bytes.NewReader(buf.Bytes()),
}
res, err = req.Do(ctx, b.es)
}
if err != nil {
return err
}
if res.Body != nil {
defer res.Body.Close()
}
if res.IsError() {
zerolog.Ctx(ctx).Warn().Str("mod", kModBulk).Str("error.message", res.String()).Msg("bulker.flushSearch: Fail writeMsearchBody")
return parseError(res, zerolog.Ctx(ctx))
}
// Reuse buffer
buf.Reset()
bodySz, err := buf.ReadFrom(res.Body)
if err != nil {
zerolog.Ctx(ctx).Error().Err(err).Str("mod", kModBulk).Msg("MsearchResponse error")
return err
}
// prealloc slice
var blk MsearchResponse
blk.Responses = make([]MsearchResponseItem, 0, queueCnt)
if err = easyjson.Unmarshal(buf.Bytes(), &blk); err != nil {
zerolog.Ctx(ctx).Error().Err(err).Str("mod", kModBulk).Msg("Unmarshal error")
return err
}
zerolog.Ctx(ctx).Trace().
Err(err).
Str("mod", kModBulk).
Dur("rtt", time.Since(start)).
Int("took", blk.Took).
Int("cnt", len(blk.Responses)).
Int("bufSz", bufSz).
Int64("bodySz", bodySz).
Msg("flushSearch")
if len(blk.Responses) != queueCnt {
return fmt.Errorf("Bulk queue length mismatch")
}
// WARNING: Once we start pushing items to
// the queue, the node pointers are invalid.
// Do NOT return a non-nil value or failQueue
// up the stack will fail.
n := queue.head
for i := range blk.Responses {
next := n.next // 'n' is invalid immediately on channel send
response := &blk.Responses[i]
select {
case n.ch <- respT{
err: response.deriveError(),
idx: n.idx,
data: response,
}:
default:
panic("Unexpected blocked response channel on flushSearch")
}
n = next
}
return nil
}