in produce_set.go [130:218]
func (ps *produceSet) buildRequest() *ProduceRequest {
req := &ProduceRequest{
RequiredAcks: ps.parent.conf.Producer.RequiredAcks,
Timeout: int32(ps.parent.conf.Producer.Timeout / time.Millisecond),
}
if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
req.Version = 2
}
if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
req.Version = 3
if ps.parent.IsTransactional() {
req.TransactionalID = &ps.parent.conf.Producer.Transaction.ID
}
}
if ps.parent.conf.Version.IsAtLeast(V1_0_0_0) {
req.Version = 5
}
if ps.parent.conf.Version.IsAtLeast(V2_0_0_0) {
req.Version = 6
}
if ps.parent.conf.Version.IsAtLeast(V2_1_0_0) {
req.Version = 7
}
for topic, partitionSets := range ps.msgs {
for partition, set := range partitionSets {
if req.Version >= 3 {
// If the API version we're hitting is 3 or greater, we need to calculate
// offsets for each record in the batch relative to FirstOffset.
// Additionally, we must set LastOffsetDelta to the value of the last offset
// in the batch. Since the OffsetDelta of the first record is 0, we know that the
// final record of any batch will have an offset of (# of records in batch) - 1.
// (See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-Messagesets
// under the RecordBatch section for details.)
rb := set.recordsToSend.RecordBatch
if len(rb.Records) > 0 {
rb.LastOffsetDelta = int32(len(rb.Records) - 1)
for i, record := range rb.Records {
record.OffsetDelta = int64(i)
}
}
// Set the batch as transactional when a transactionalID is set
rb.IsTransactional = ps.parent.IsTransactional()
req.AddBatch(topic, partition, rb)
continue
}
if ps.parent.conf.Producer.Compression == CompressionNone {
req.AddSet(topic, partition, set.recordsToSend.MsgSet)
} else {
// When compression is enabled, the entire set for each partition is compressed
// and sent as the payload of a single fake "message" with the appropriate codec
// set and no key. When the server sees a message with a compression codec, it
// decompresses the payload and treats the result as its message set.
if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
// If our version is 0.10 or later, assign relative offsets
// to the inner messages. This lets the broker avoid
// recompressing the message set.
// (See https://cwiki.apache.org/confluence/display/KAFKA/KIP-31+-+Move+to+relative+offsets+in+compressed+message+sets
// for details on relative offsets.)
for i, msg := range set.recordsToSend.MsgSet.Messages {
msg.Offset = int64(i)
}
}
payload, err := encode(set.recordsToSend.MsgSet, ps.parent.metricsRegistry)
if err != nil {
Logger.Println(err) // if this happens, it's basically our fault.
panic(err)
}
compMsg := &Message{
Codec: ps.parent.conf.Producer.Compression,
CompressionLevel: ps.parent.conf.Producer.CompressionLevel,
Key: nil,
Value: payload,
Set: set.recordsToSend.MsgSet, // Provide the underlying message set for accurate metrics
}
if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
compMsg.Version = 1
compMsg.Timestamp = set.recordsToSend.MsgSet.Messages[0].Msg.Timestamp
}
req.AddMessage(topic, partition, compMsg)
}
}
}
return req
}