Class: Fluent::Kafka2Output

Inherits:
Output
  • Object
show all
Includes:
Fluent::KafkaPluginUtil::SSLSettings, Fluent::KafkaPluginUtil::SaslSettings
Defined in:
lib/fluent/plugin/out_kafka2.rb

Constant Summary

Constants included from Fluent::KafkaPluginUtil::SSLSettings

Fluent::KafkaPluginUtil::SSLSettings::DummyFormatter

Instance Method Summary collapse

Methods included from Fluent::KafkaPluginUtil::SaslSettings

included

Methods included from Fluent::KafkaPluginUtil::SSLSettings

included, #pickup_ssl_endpoint, #read_ssl_file

Constructor Details

#initializeKafka2Output

Returns a new instance of Kafka2Output.



101
102
103
104
105
106
107
108
109
110
111
# File 'lib/fluent/plugin/out_kafka2.rb', line 101

def initialize
  super

  @kafka = nil
  @producers = nil
  @producers_mutex = nil
  @shared_producer = nil

  @writing_threads_mutex = Mutex.new
  @writing_threads = Set.new
end

Instance Method Details

#closeObject



225
226
227
228
# File 'lib/fluent/plugin/out_kafka2.rb', line 225

def close
  super
  @kafka.close if @kafka
end

#configure(conf) ⇒ Object



145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
# File 'lib/fluent/plugin/out_kafka2.rb', line 145

def configure(conf)
  super

  if @brokers.size > 0
    @seed_brokers = @brokers
    log.info "brokers has been set: #{@seed_brokers}"
  else
    raise Fluent::ConfigError, 'No brokers specified. Need one broker at least.'
  end

  formatter_conf = conf.elements('format').first
  unless formatter_conf
    raise Fluent::ConfigError, "<format> section is required."
  end
  unless formatter_conf["@type"]
    raise Fluent::ConfigError, "format/@type is required."
  end
  @formatter_proc = setup_formatter(formatter_conf)

  if @default_topic.nil?
    if @use_default_for_unknown_topic
      raise Fluent::ConfigError, "default_topic must be set when use_default_for_unknown_topic is true"
    end
    if @chunk_keys.include?('topic') && !@chunk_key_tag
      log.warn "Use 'topic' field of event record for topic but no fallback. Recommend to set default_topic or set 'tag' in buffer chunk keys like <buffer topic,tag>"
    end
  else
    if @chunk_key_tag
      log.warn "default_topic is set. Fluentd's event tag is not used for topic"
    end
  end

  @producer_opts = {max_retries: @max_send_retries, required_acks: @required_acks, idempotent: @idempotent}
  @producer_opts[:ack_timeout] = @ack_timeout if @ack_timeout
  @producer_opts[:compression_codec] = @compression_codec.to_sym if @compression_codec
  if @active_support_notification_regex
    require 'active_support/notifications'
    require 'active_support/core_ext/hash/keys'
    ActiveSupport::Notifications.subscribe(Regexp.new(@active_support_notification_regex)) do |*args|
      event = ActiveSupport::Notifications::Event.new(*args)
      message = event.payload.respond_to?(:stringify_keys) ? event.payload.stringify_keys : event.payload
      @router.emit("fluent_kafka_stats.#{event.name}", Time.now.to_i, message)
    end
  end

  @topic_key_sym = @topic_key.to_sym

  @headers_from_record_accessors = {}
  @headers_from_record.each do |key, value|
    @headers_from_record_accessors[key] = record_accessor_create(value)
  end

  @exclude_field_accessors = @exclude_fields.map do |field|
    record_accessor_create(field)
  end

  @record_field_accessor = nil
  @record_field_accessor = record_accessor_create(@record_key) unless @record_key.nil?
end

#create_producerObject



209
210
211
# File 'lib/fluent/plugin/out_kafka2.rb', line 209

def create_producer
  @kafka.custom_producer(**@producer_opts)
end

#get_producerObject



270
271
272
273
274
275
276
277
278
279
280
281
282
283
# File 'lib/fluent/plugin/out_kafka2.rb', line 270

def get_producer
  if @share_producer
    @shared_producer
  else
    @producers_mutex.synchronize {
      producer = @producers[Thread.current.object_id]
      unless producer
        producer = create_producer
        @producers[Thread.current.object_id] = producer
      end
      producer
    }
  end
end

#multi_workers_ready?Boolean

Returns:

  • (Boolean)


205
206
207
# File 'lib/fluent/plugin/out_kafka2.rb', line 205

def multi_workers_ready?
  true
end

#refresh_client(raise_error = true) ⇒ Object



113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
# File 'lib/fluent/plugin/out_kafka2.rb', line 113

def refresh_client(raise_error = true)
  begin
    logger = @get_kafka_client_log ? log : nil
    if @scram_mechanism != nil && @username != nil && @password != nil
      @kafka = Kafka.new(seed_brokers: @seed_brokers, client_id: @client_id, logger: logger, connect_timeout: @connect_timeout, socket_timeout: @socket_timeout, ssl_ca_cert_file_path: @ssl_ca_cert,
                         ssl_client_cert: read_ssl_file(@ssl_client_cert), ssl_client_cert_key: read_ssl_file(@ssl_client_cert_key), ssl_client_cert_chain: read_ssl_file(@ssl_client_cert_chain),
                         ssl_ca_certs_from_system: @ssl_ca_certs_from_system, sasl_scram_username: @username, sasl_scram_password: @password,
                         sasl_scram_mechanism: @scram_mechanism, sasl_over_ssl: @sasl_over_ssl, ssl_verify_hostname: @ssl_verify_hostname, resolve_seed_brokers: @resolve_seed_brokers,
                         partitioner: Kafka::Partitioner.new(hash_function: @partitioner_hash_function))
    elsif @username != nil && @password != nil
      @kafka = Kafka.new(seed_brokers: @seed_brokers, client_id: @client_id, logger: logger, connect_timeout: @connect_timeout, socket_timeout: @socket_timeout, ssl_ca_cert_file_path: @ssl_ca_cert,
                         ssl_client_cert: read_ssl_file(@ssl_client_cert), ssl_client_cert_key: read_ssl_file(@ssl_client_cert_key), ssl_client_cert_chain: read_ssl_file(@ssl_client_cert_chain),
                         ssl_ca_certs_from_system: @ssl_ca_certs_from_system, sasl_plain_username: @username, sasl_plain_password: @password, sasl_over_ssl: @sasl_over_ssl,
                         ssl_verify_hostname: @ssl_verify_hostname, resolve_seed_brokers: @resolve_seed_brokers,
                         partitioner: Kafka::Partitioner.new(hash_function: @partitioner_hash_function))
    else
      @kafka = Kafka.new(seed_brokers: @seed_brokers, client_id: @client_id, logger: logger, connect_timeout: @connect_timeout, socket_timeout: @socket_timeout, ssl_ca_cert_file_path: @ssl_ca_cert,
                         ssl_client_cert: read_ssl_file(@ssl_client_cert), ssl_client_cert_key: read_ssl_file(@ssl_client_cert_key), ssl_client_cert_chain: read_ssl_file(@ssl_client_cert_chain),
                         ssl_ca_certs_from_system: @ssl_ca_certs_from_system, sasl_gssapi_principal: @principal, sasl_gssapi_keytab: @keytab, sasl_over_ssl: @sasl_over_ssl,
                         ssl_verify_hostname: @ssl_verify_hostname, resolve_seed_brokers: @resolve_seed_brokers,
                         partitioner: Kafka::Partitioner.new(hash_function: @partitioner_hash_function))
    end
    log.info "initialized kafka producer: #{@client_id}"
  rescue Exception => e
    if raise_error # During startup, error should be reported to engine and stop its phase for safety.
      raise e
    else
      log.error e
    end
  end
end

#setup_formatter(conf) ⇒ Object



285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
# File 'lib/fluent/plugin/out_kafka2.rb', line 285

def setup_formatter(conf)
  type = conf['@type']
  case type
  when 'json'
    begin
      require 'oj'
      Oj.default_options = Fluent::DEFAULT_OJ_OPTIONS
      Proc.new { |tag, time, record| Oj.dump(record) }
    rescue LoadError
      require 'yajl'
      Proc.new { |tag, time, record| Yajl::Encoder.encode(record) }
    end
  when 'ltsv'
    require 'ltsv'
    Proc.new { |tag, time, record| LTSV.dump(record) }
  else
    @formatter = formatter_create(usage: 'kafka-plugin', conf: conf)
    @formatter.method(:format)
  end
end

#shutdownObject



245
246
247
248
249
# File 'lib/fluent/plugin/out_kafka2.rb', line 245

def shutdown
  super
  wait_writing_threads
  shutdown_producers
end

#shutdown_producersObject



251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
# File 'lib/fluent/plugin/out_kafka2.rb', line 251

def shutdown_producers
  if @share_producer
    @shared_producer.shutdown
    @shared_producer = nil
  else
    @producers_mutex.synchronize {
      shutdown_threads = @producers.map { |key, producer|
        th = Thread.new {
          producer.shutdown
        }
        th.abort_on_exception = true
        th
      }
      shutdown_threads.each { |th| th.join }
      @producers = {}
    }
  end
end

#startObject



213
214
215
216
217
218
219
220
221
222
223
# File 'lib/fluent/plugin/out_kafka2.rb', line 213

def start
  super
  refresh_client

  if @share_producer
    @shared_producer = create_producer
  else
    @producers = {}
    @producers_mutex = Mutex.new
  end
end

#terminateObject



230
231
232
233
# File 'lib/fluent/plugin/out_kafka2.rb', line 230

def terminate
  super
  @kafka = nil
end

#wait_writing_threadsObject



235
236
237
238
239
240
241
242
243
# File 'lib/fluent/plugin/out_kafka2.rb', line 235

def wait_writing_threads
  done = false
  until done do
    @writing_threads_mutex.synchronize do
      done = true if @writing_threads.empty?
    end
    sleep(1) unless done
  end
end

#write(chunk) ⇒ Object

TODO: optimize write performance



307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
# File 'lib/fluent/plugin/out_kafka2.rb', line 307

def write(chunk)
  @writing_threads_mutex.synchronize { @writing_threads.add(Thread.current) }

  tag = chunk..tag
  topic = if @topic
            extract_placeholders(@topic, chunk)
          else
            (chunk..variables && chunk..variables[@topic_key_sym]) || @default_topic || tag
          end

  messages = 0

  base_headers = @headers
  mutate_headers = !@headers_from_record_accessors.empty?

  begin
    producer = get_producer
    chunk.msgpack_each { |time, record|
      begin
        record = inject_values_to_record(tag, time, record)
        record.delete(@topic_key) if @exclude_topic_key
        partition_key = (@exclude_partition_key ? record.delete(@partition_key_key) : record[@partition_key_key]) || @default_partition_key
        partition = (@exclude_partition ? record.delete(@partition_key) : record[@partition_key]) || @default_partition
        message_key = (@exclude_message_key ? record.delete(@message_key_key) : record[@message_key_key]) || @default_message_key

        if mutate_headers
          headers = base_headers.clone
          @headers_from_record_accessors.each do |key, header_accessor|
            headers[key] = header_accessor.call(record)
          end
        else
          headers = base_headers
        end

        unless @exclude_fields.empty?
          @exclude_field_accessors.each do |exclude_field_accessor|
            exclude_field_accessor.delete(record)
          end
        end

        record = @record_field_accessor.call(record) unless @record_field_accessor.nil?
        record_buf = @formatter_proc.call(tag, time, record)
        record_buf_bytes = record_buf.bytesize
        if @max_send_limit_bytes && record_buf_bytes > @max_send_limit_bytes
          log.warn "record size exceeds max_send_limit_bytes. Skip event:", :time => time, :record_size => record_buf_bytes
          log.debug "Skipped event:", :record => record
          next
        end
      rescue StandardError => e
        log.warn "unexpected error during format record. Skip broken event:", :error => e.to_s, :error_class => e.class.to_s, :time => time, :record => record
        next
      end

      log.trace { "message will send to #{topic} with partition_key: #{partition_key}, partition: #{partition}, message_key: #{message_key} and value: #{record_buf}." }
      messages += 1

      producer.produce(record_buf, key: message_key, partition_key: partition_key, partition: partition, headers: headers,
                       create_time: @use_event_time ? Time.at(time) : Time.now, topic: topic)
    }

    if messages > 0
      log.debug { "#{messages} messages send." }
      if @discard_kafka_delivery_failed
        begin
          producer.deliver_messages
        rescue Kafka::DeliveryFailed => e
          log.warn "DeliveryFailed occurred. Discard broken event:", :error => e.to_s, :error_class => e.class.to_s, :tag => tag
          producer.clear_buffer
        end
      else
        producer.deliver_messages
      end
    end
  rescue Kafka::UnknownTopicOrPartition
    if @use_default_for_unknown_topic && topic != @default_topic
      log.warn "'#{topic}' topic not found. Retry with '#{default_topic}' topic"
      topic = @default_topic
      retry
    end
    raise
  end
rescue Exception => e
  ignore = @ignore_exceptions.include?(e.class.name)

  log.warn "Send exception occurred: #{e}"
  log.warn "Exception Backtrace : #{e.backtrace.join("\n")}"
  log.warn "Exception ignored in tag : #{tag}" if ignore
  # For safety, refresh client and its producers
  refresh_client(false)
  # raise UnrecoverableError for backup ignored exception chunk
  raise Fluent::UnrecoverableError if ignore && exception_backup
  # Raise exception to retry sendind messages
  raise e unless ignore
ensure
  @writing_threads_mutex.synchronize { @writing_threads.delete(Thread.current) }
end