Class: Fluent::DatadogOutput

Inherits:
Plugin::Output
  • Object
show all
Defined in:
lib/fluent/plugin/out_datadog.rb

Defined Under Namespace

Classes: DatadogClient, DatadogHTTPClient, DatadogTCPClient, RetryableError

Constant Summary collapse

DD_MAX_BATCH_LENGTH =

Max limits for transport regardless of Fluentd buffer, respecting docs.datadoghq.com/api/?lang=bash#logs

500
DD_MAX_BATCH_SIZE =
5000000
DD_TRUNCATION_SUFFIX =
"...TRUNCATED..."
DD_DEFAULT_HTTP_ENDPOINT =
"http-intake.logs.datadoghq.com"
DD_DEFAULT_TCP_ENDPOINT =
"intake.logs.datadoghq.com"
DEFAULT_BUFFER_TYPE =
"memory"

Instance Method Summary collapse

Constructor Details

#initializeDatadogOutput

Returns a new instance of DatadogOutput.



63
64
65
# File 'lib/fluent/plugin/out_datadog.rb', line 63

def initialize
  super
end

Instance Method Details

#batch_http_events(encoded_events, max_batch_length, max_request_size) ⇒ Object

Group HTTP events in batches



161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
# File 'lib/fluent/plugin/out_datadog.rb', line 161

def batch_http_events(encoded_events, max_batch_length, max_request_size)
  batches = []
  current_batch = []
  current_batch_size = 0
  encoded_events.each_with_index do |encoded_event, i|
    current_event_size = encoded_event.bytesize
    # If this unique log size is bigger than the request size, truncate it
    if current_event_size > max_request_size
      encoded_event = truncate(encoded_event, max_request_size)
      current_event_size = encoded_event.bytesize
    end

    if (i > 0 and i % max_batch_length == 0) or (current_batch_size + current_event_size > max_request_size)
      batches << current_batch
      current_batch = []
      current_batch_size = 0
    end

    current_batch_size += encoded_event.bytesize
    current_batch << encoded_event
  end
  batches << current_batch
  batches
end

#configure(conf) ⇒ Object



67
68
69
70
71
72
73
74
75
76
77
78
79
# File 'lib/fluent/plugin/out_datadog.rb', line 67

def configure(conf)
  compat_parameters_convert(conf, :buffer)
  super
  return if @dd_hostname

  if not @use_http and @host == DD_DEFAULT_HTTP_ENDPOINT
    @host = DD_DEFAULT_TCP_ENDPOINT
  end

  # Set dd_hostname if not already set (can be set when using fluentd as aggregator)
  @dd_hostname = %x[hostname -f 2> /dev/null].strip
  @dd_hostname = Socket.gethostname if @dd_hostname.empty?
end

#enrich_record(tag, time, record) ⇒ Object

Enrich records with metadata such as service, tags or source



206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
# File 'lib/fluent/plugin/out_datadog.rb', line 206

def enrich_record(tag, time, record)
  if @dd_sourcecategory
    record["ddsourcecategory"] ||= @dd_sourcecategory
  end
  if @dd_source
    record["ddsource"] ||= @dd_source
  end
  if @dd_tags
    record["ddtags"] ||= @dd_tags
  end
  if @service
    record["service"] ||= @service
  end
  if @dd_hostname
    # set the record hostname to the configured dd_hostname only
    # if the record hostname is empty, ensuring having a hostname set
    # even if the record doesn't contain any.
    record["hostname"] ||= @dd_hostname
  end

  if @include_tag_key
    record[@tag_key] = tag
  end
  # If @timestamp_key already exists, we don't overwrite it.
  if @timestamp_key and record[@timestamp_key].nil? and time
    record[@timestamp_key] = Time.at(time).utc.iso8601(3)
  end

  container_tags = get_container_tags(record)
  unless container_tags.empty?
    if record["ddtags"].nil? || record["ddtags"].empty?
      record["ddtags"] = container_tags
    else
      record["ddtags"] = record["ddtags"] + "," + container_tags
    end
  end
  record
end

#format(tag, time, record) ⇒ Object

This method is called when an event reaches Fluentd.



104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
# File 'lib/fluent/plugin/out_datadog.rb', line 104

def format(tag, time, record)
  # When Fluent::EventTime is msgpack'ed it gets converted to int with seconds
  # precision only. We explicitly convert it to floating point number, which
  # is compatible with Time.at below.
  record = enrich_record(tag, time.to_f, record)
  if @use_http
    record = Yajl.dump(record)
  else
    if @use_json
      record = "#{api_key} #{Yajl.dump(record)}"
    else
      record = "#{api_key} #{record}"
    end
  end
  [record].to_msgpack
end

#format_http_event_batch(events) ⇒ Object

Format batch of http events



201
202
203
# File 'lib/fluent/plugin/out_datadog.rb', line 201

def format_http_event_batch(events)
  "[#{events.join(',')}]"
end

#formatted_to_msgpack_binary?Boolean

Returns:

  • (Boolean)


85
86
87
# File 'lib/fluent/plugin/out_datadog.rb', line 85

def formatted_to_msgpack_binary?
  true
end

#get_container_tags(record) ⇒ Object

Collect docker and kubernetes tags for your logs using filter_kubernetes_metadata plugin, for more information about the attribute names, check: github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter/blob/master/lib/fluent/plugin/filter_kubernetes_metadata.rb#L265



380
381
382
383
384
385
# File 'lib/fluent/plugin/out_datadog.rb', line 380

def get_container_tags(record)
  [
      get_kubernetes_tags(record),
      get_docker_tags(record)
  ].compact.join(",")
end

#get_docker_tags(record) ⇒ Object



400
401
402
403
404
405
406
407
408
# File 'lib/fluent/plugin/out_datadog.rb', line 400

def get_docker_tags(record)
  if record.key?('docker') and not record.fetch('docker').nil?
    docker = record['docker']
    tags = Array.new
    tags.push("container_id:" + docker['container_id']) unless docker['container_id'].nil?
    return tags.join(",")
  end
  nil
end

#get_kubernetes_tags(record) ⇒ Object



387
388
389
390
391
392
393
394
395
396
397
398
# File 'lib/fluent/plugin/out_datadog.rb', line 387

def get_kubernetes_tags(record)
  if record.key?('kubernetes') and not record.fetch('kubernetes').nil?
    kubernetes = record['kubernetes']
    tags = Array.new
    tags.push("image_name:" + kubernetes['container_image']) unless kubernetes['container_image'].nil?
    tags.push("container_name:" + kubernetes['container_name']) unless kubernetes['container_name'].nil?
    tags.push("kube_namespace:" + kubernetes['namespace_name']) unless kubernetes['namespace_name'].nil?
    tags.push("pod_name:" + kubernetes['pod_name']) unless kubernetes['pod_name'].nil?
    return tags.join(",")
  end
  nil
end

#gzip_compress(payload, compression_level) ⇒ Object

Compress logs with GZIP



246
247
248
249
250
251
252
253
254
255
256
# File 'lib/fluent/plugin/out_datadog.rb', line 246

def gzip_compress(payload, compression_level)
  gz = StringIO.new
  gz.set_encoding("BINARY")
  z = Zlib::GzipWriter.new(gz, compression_level)
  begin
    z.write(payload)
  ensure
    z.close
  end
  gz.string
end

#max(a, b) ⇒ Object



196
197
198
# File 'lib/fluent/plugin/out_datadog.rb', line 196

def max(a, b)
  a > b ? a : b
end

#multi_workers_ready?Boolean

Returns:

  • (Boolean)


81
82
83
# File 'lib/fluent/plugin/out_datadog.rb', line 81

def multi_workers_ready?
  true
end

#new_client(logger, api_key, use_http, use_ssl, no_ssl_validation, host, ssl_port, port, use_compression) ⇒ Object

Build a new transport client



259
260
261
262
263
264
265
# File 'lib/fluent/plugin/out_datadog.rb', line 259

def new_client(logger, api_key, use_http, use_ssl, no_ssl_validation, host, ssl_port, port, use_compression)
  if use_http
    DatadogHTTPClient.new logger, use_ssl, no_ssl_validation, host, ssl_port, port, use_compression, api_key
  else
    DatadogTCPClient.new logger, use_ssl, no_ssl_validation, host, ssl_port, port
  end
end

#process_http_events(events, use_compression, compression_level, max_retries, max_backoff, max_batch_length, max_batch_size) ⇒ Object

Process and send a set of http events. Potentially break down this set of http events in smaller batches



141
142
143
144
145
146
147
148
149
150
# File 'lib/fluent/plugin/out_datadog.rb', line 141

def process_http_events(events, use_compression, compression_level, max_retries, max_backoff, max_batch_length, max_batch_size)
  batches = batch_http_events(events, max_batch_length, max_batch_size)
  batches.each do |batched_event|
    formatted_events = format_http_event_batch(batched_event)
    if use_compression
      formatted_events = gzip_compress(formatted_events, compression_level)
    end
    @client.send_retries(formatted_events, max_retries, max_backoff)
  end
end

#process_tcp_event(event, max_retries, max_backoff, max_batch_size) ⇒ Object

Process and send a single tcp event



153
154
155
156
157
158
# File 'lib/fluent/plugin/out_datadog.rb', line 153

def process_tcp_event(event, max_retries, max_backoff, max_batch_size)
  if event.bytesize > max_batch_size
    event = truncate(event, max_batch_size)
  end
  @client.send_retries(event, max_retries, max_backoff)
end

#shutdownObject



94
95
96
# File 'lib/fluent/plugin/out_datadog.rb', line 94

def shutdown
  super
end

#startObject



89
90
91
92
# File 'lib/fluent/plugin/out_datadog.rb', line 89

def start
  super
  @client = new_client(log, @api_key, @use_http, @use_ssl, @no_ssl_validation, @host, @ssl_port, @port, @use_compression)
end

#terminateObject



98
99
100
101
# File 'lib/fluent/plugin/out_datadog.rb', line 98

def terminate
  super
  @client.close if @client
end

#truncate(event, max_length) ⇒ Object

Truncate events over the provided max length, appending a marker when truncated



187
188
189
190
191
192
193
194
# File 'lib/fluent/plugin/out_datadog.rb', line 187

def truncate(event, max_length)
  if event.length > max_length
    event = event[0..max_length - 1]
    event[max(0, max_length - DD_TRUNCATION_SUFFIX.length)..max_length - 1] = DD_TRUNCATION_SUFFIX
    return event
  end
  event
end

#write(chunk) ⇒ Object

NOTE! This method is called by internal thread, not Fluentd’s main thread. ‘chunk’ is a buffer chunk that includes multiple formatted events.



124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
# File 'lib/fluent/plugin/out_datadog.rb', line 124

def write(chunk)
  if @use_http
    events = Array.new
    chunk.msgpack_each do |record|
      next if record.empty?
      events.push record[0]
    end
    process_http_events(events, @use_compression, @compression_level, @max_retries, @max_backoff, DD_MAX_BATCH_LENGTH, DD_MAX_BATCH_SIZE)
  else
    chunk.msgpack_each do |record|
      next if record.empty?
      process_tcp_event(record[0], @max_retries, @max_backoff, DD_MAX_BATCH_SIZE)
    end
  end
end