blob: de88ba4e919e09d25add733ace65c2e0a835cc19 [file] [log] [blame]
Abhay Kumar40252eb2025-10-13 13:25:53 +00001package sarama
2
3import "github.com/rcrowley/go-metrics"
4
5// RequiredAcks is used in Produce Requests to tell the broker how many replica acknowledgements
6// it must see before responding. Any of the constants defined here are valid. On broker versions
7// prior to 0.8.2.0 any other positive int16 is also valid (the broker will wait for that many
8// acknowledgements) but in 0.8.2.0 and later this will raise an exception (it has been replaced
9// by setting the `min.isr` value in the brokers configuration).
10type RequiredAcks int16
11
12const (
13 // NoResponse doesn't send any response, the TCP ACK is all you get.
14 NoResponse RequiredAcks = 0
15 // WaitForLocal waits for only the local commit to succeed before responding.
16 WaitForLocal RequiredAcks = 1
17 // WaitForAll waits for all in-sync replicas to commit before responding.
18 // The minimum number of in-sync replicas is configured on the broker via
19 // the `min.insync.replicas` configuration key.
20 WaitForAll RequiredAcks = -1
21)
22
23type ProduceRequest struct {
24 TransactionalID *string
25 RequiredAcks RequiredAcks
26 Timeout int32
27 Version int16 // v1 requires Kafka 0.9, v2 requires Kafka 0.10, v3 requires Kafka 0.11
28 records map[string]map[int32]Records
29}
30
31func (r *ProduceRequest) setVersion(v int16) {
32 r.Version = v
33}
34
35func updateMsgSetMetrics(msgSet *MessageSet, compressionRatioMetric metrics.Histogram,
36 topicCompressionRatioMetric metrics.Histogram,
37) int64 {
38 var topicRecordCount int64
39 for _, messageBlock := range msgSet.Messages {
40 // Is this a fake "message" wrapping real messages?
41 if messageBlock.Msg.Set != nil {
42 topicRecordCount += int64(len(messageBlock.Msg.Set.Messages))
43 } else {
44 // A single uncompressed message
45 topicRecordCount++
46 }
47 // Better be safe than sorry when computing the compression ratio
48 if messageBlock.Msg.compressedSize != 0 {
49 compressionRatio := float64(len(messageBlock.Msg.Value)) /
50 float64(messageBlock.Msg.compressedSize)
51 // Histogram do not support decimal values, let's multiple it by 100 for better precision
52 intCompressionRatio := int64(100 * compressionRatio)
53 compressionRatioMetric.Update(intCompressionRatio)
54 topicCompressionRatioMetric.Update(intCompressionRatio)
55 }
56 }
57 return topicRecordCount
58}
59
60func updateBatchMetrics(recordBatch *RecordBatch, compressionRatioMetric metrics.Histogram,
61 topicCompressionRatioMetric metrics.Histogram,
62) int64 {
63 if recordBatch.compressedRecords != nil {
64 compressionRatio := int64(float64(recordBatch.recordsLen) / float64(len(recordBatch.compressedRecords)) * 100)
65 compressionRatioMetric.Update(compressionRatio)
66 topicCompressionRatioMetric.Update(compressionRatio)
67 }
68
69 return int64(len(recordBatch.Records))
70}
71
72func (r *ProduceRequest) encode(pe packetEncoder) error {
73 if r.Version >= 3 {
74 if err := pe.putNullableString(r.TransactionalID); err != nil {
75 return err
76 }
77 }
78 pe.putInt16(int16(r.RequiredAcks))
79 pe.putInt32(r.Timeout)
80 metricRegistry := pe.metricRegistry()
81 var batchSizeMetric metrics.Histogram
82 var compressionRatioMetric metrics.Histogram
83 if metricRegistry != nil {
84 batchSizeMetric = getOrRegisterHistogram("batch-size", metricRegistry)
85 compressionRatioMetric = getOrRegisterHistogram("compression-ratio", metricRegistry)
86 }
87 totalRecordCount := int64(0)
88
89 err := pe.putArrayLength(len(r.records))
90 if err != nil {
91 return err
92 }
93
94 for topic, partitions := range r.records {
95 err = pe.putString(topic)
96 if err != nil {
97 return err
98 }
99 err = pe.putArrayLength(len(partitions))
100 if err != nil {
101 return err
102 }
103 topicRecordCount := int64(0)
104 var topicCompressionRatioMetric metrics.Histogram
105 if metricRegistry != nil {
106 topicCompressionRatioMetric = getOrRegisterTopicHistogram("compression-ratio", topic, metricRegistry)
107 }
108 for id, records := range partitions {
109 startOffset := pe.offset()
110 pe.putInt32(id)
111 pe.push(&lengthField{})
112 err = records.encode(pe)
113 if err != nil {
114 return err
115 }
116 err = pe.pop()
117 if err != nil {
118 return err
119 }
120 if metricRegistry != nil {
121 if r.Version >= 3 {
122 topicRecordCount += updateBatchMetrics(records.RecordBatch, compressionRatioMetric, topicCompressionRatioMetric)
123 } else {
124 topicRecordCount += updateMsgSetMetrics(records.MsgSet, compressionRatioMetric, topicCompressionRatioMetric)
125 }
126 batchSize := int64(pe.offset() - startOffset)
127 batchSizeMetric.Update(batchSize)
128 getOrRegisterTopicHistogram("batch-size", topic, metricRegistry).Update(batchSize)
129 }
130 }
131 if topicRecordCount > 0 {
132 getOrRegisterTopicMeter("record-send-rate", topic, metricRegistry).Mark(topicRecordCount)
133 getOrRegisterTopicHistogram("records-per-request", topic, metricRegistry).Update(topicRecordCount)
134 totalRecordCount += topicRecordCount
135 }
136 }
137 if totalRecordCount > 0 {
138 metrics.GetOrRegisterMeter("record-send-rate", metricRegistry).Mark(totalRecordCount)
139 getOrRegisterHistogram("records-per-request", metricRegistry).Update(totalRecordCount)
140 }
141
142 return nil
143}
144
145func (r *ProduceRequest) decode(pd packetDecoder, version int16) error {
146 r.Version = version
147
148 if version >= 3 {
149 id, err := pd.getNullableString()
150 if err != nil {
151 return err
152 }
153 r.TransactionalID = id
154 }
155 requiredAcks, err := pd.getInt16()
156 if err != nil {
157 return err
158 }
159 r.RequiredAcks = RequiredAcks(requiredAcks)
160 if r.Timeout, err = pd.getInt32(); err != nil {
161 return err
162 }
163 topicCount, err := pd.getArrayLength()
164 if err != nil {
165 return err
166 }
167 if topicCount == 0 {
168 return nil
169 }
170
171 r.records = make(map[string]map[int32]Records)
172 for i := 0; i < topicCount; i++ {
173 topic, err := pd.getString()
174 if err != nil {
175 return err
176 }
177 partitionCount, err := pd.getArrayLength()
178 if err != nil {
179 return err
180 }
181 r.records[topic] = make(map[int32]Records)
182
183 for j := 0; j < partitionCount; j++ {
184 partition, err := pd.getInt32()
185 if err != nil {
186 return err
187 }
188 size, err := pd.getInt32()
189 if err != nil {
190 return err
191 }
192 recordsDecoder, err := pd.getSubset(int(size))
193 if err != nil {
194 return err
195 }
196 var records Records
197 if err := records.decode(recordsDecoder); err != nil {
198 return err
199 }
200 r.records[topic][partition] = records
201 }
202 }
203
204 return nil
205}
206
207func (r *ProduceRequest) key() int16 {
208 return apiKeyProduce
209}
210
211func (r *ProduceRequest) version() int16 {
212 return r.Version
213}
214
215func (r *ProduceRequest) headerVersion() int16 {
216 return 1
217}
218
219func (r *ProduceRequest) isValidVersion() bool {
220 return r.Version >= 0 && r.Version <= 7
221}
222
223func (r *ProduceRequest) requiredVersion() KafkaVersion {
224 switch r.Version {
225 case 7:
226 return V2_1_0_0
227 case 6:
228 return V2_0_0_0
229 case 4, 5:
230 return V1_0_0_0
231 case 3:
232 return V0_11_0_0
233 case 2:
234 return V0_10_0_0
235 case 1:
236 return V0_9_0_0
237 case 0:
238 return V0_8_2_0
239 default:
240 return V2_1_0_0
241 }
242}
243
244func (r *ProduceRequest) ensureRecords(topic string, partition int32) {
245 if r.records == nil {
246 r.records = make(map[string]map[int32]Records)
247 }
248
249 if r.records[topic] == nil {
250 r.records[topic] = make(map[int32]Records)
251 }
252}
253
254func (r *ProduceRequest) AddMessage(topic string, partition int32, msg *Message) {
255 r.ensureRecords(topic, partition)
256 set := r.records[topic][partition].MsgSet
257
258 if set == nil {
259 set = new(MessageSet)
260 r.records[topic][partition] = newLegacyRecords(set)
261 }
262
263 set.addMessage(msg)
264}
265
266func (r *ProduceRequest) AddSet(topic string, partition int32, set *MessageSet) {
267 r.ensureRecords(topic, partition)
268 r.records[topic][partition] = newLegacyRecords(set)
269}
270
271func (r *ProduceRequest) AddBatch(topic string, partition int32, batch *RecordBatch) {
272 r.ensureRecords(topic, partition)
273 r.records[topic][partition] = newDefaultRecords(batch)
274}