Class: Fluent::DatadogOutput
- Inherits:
-
Plugin::Output
- Object
- Plugin::Output
- Fluent::DatadogOutput
- Defined in:
- lib/fluent/plugin/out_datadog.rb
Defined Under Namespace
Classes: DatadogClient, DatadogHTTPClient, DatadogTCPClient, RetryableError
Constant Summary collapse
- DD_MAX_BATCH_LENGTH =
Max limits for transport regardless of Fluentd buffer, respecting docs.datadoghq.com/api/?lang=bash#logs
500- DD_MAX_BATCH_SIZE =
5000000- DD_TRUNCATION_SUFFIX =
"...TRUNCATED..."- DD_DEFAULT_HTTP_ENDPOINT =
"http-intake.logs.datadoghq.com"- DD_DEFAULT_TCP_ENDPOINT =
"intake.logs.datadoghq.com"- DEFAULT_BUFFER_TYPE =
"memory"
Instance Method Summary collapse
-
#batch_http_events(encoded_events, max_batch_length, max_request_size) ⇒ Object
Group HTTP events in batches.
- #configure(conf) ⇒ Object
-
#enrich_record(tag, time, record) ⇒ Object
Enrich records with metadata such as service, tags or source.
-
#format(tag, time, record) ⇒ Object
This method is called when an event reaches Fluentd.
-
#format_http_event_batch(events) ⇒ Object
Format batch of http events.
- #formatted_to_msgpack_binary? ⇒ Boolean
-
#get_container_tags(record) ⇒ Object
Collect docker and kubernetes tags for your logs using ‘filter_kubernetes_metadata` plugin, for more information about the attribute names, check: github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter/blob/master/lib/fluent/plugin/filter_kubernetes_metadata.rb#L265.
- #get_docker_tags(record) ⇒ Object
- #get_kubernetes_tags(record) ⇒ Object
-
#gzip_compress(payload, compression_level) ⇒ Object
Compress logs with GZIP.
-
#initialize ⇒ DatadogOutput
constructor
A new instance of DatadogOutput.
- #max(a, b) ⇒ Object
- #multi_workers_ready? ⇒ Boolean
-
#new_client(logger, api_key, use_http, use_ssl, no_ssl_validation, host, ssl_port, port, http_proxy, use_compression, force_v1_routes) ⇒ Object
Build a new transport client.
-
#process_http_events(events, use_compression, compression_level, max_retries, max_backoff, max_batch_length, max_batch_size) ⇒ Object
Process and send a set of http events.
-
#process_tcp_event(event, max_retries, max_backoff, max_batch_size) ⇒ Object
Process and send a single tcp event.
- #shutdown ⇒ Object
- #start ⇒ Object
- #terminate ⇒ Object
-
#truncate(event, max_length) ⇒ Object
Truncate events over the provided max length, appending a marker when truncated.
-
#write(chunk) ⇒ Object
NOTE! This method is called by internal thread, not Fluentd’s main thread.
Constructor Details
#initialize ⇒ DatadogOutput
Returns a new instance of DatadogOutput.
67 68 69 |
# File 'lib/fluent/plugin/out_datadog.rb', line 67 def initialize super end |
Instance Method Details
#batch_http_events(encoded_events, max_batch_length, max_request_size) ⇒ Object
Group HTTP events in batches
169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 |
# File 'lib/fluent/plugin/out_datadog.rb', line 169 def batch_http_events(encoded_events, max_batch_length, max_request_size) batches = [] current_batch = [] current_batch_size = 0 encoded_events.each_with_index do |encoded_event, i| current_event_size = encoded_event.bytesize # If this unique log size is bigger than the request size, truncate it if current_event_size > max_request_size encoded_event = truncate(encoded_event, max_request_size) current_event_size = encoded_event.bytesize end if (i > 0 and i % max_batch_length == 0) or (current_batch_size + current_event_size > max_request_size) batches << current_batch current_batch = [] current_batch_size = 0 end current_batch_size += encoded_event.bytesize current_batch << encoded_event end batches << current_batch batches end |
#configure(conf) ⇒ Object
71 72 73 74 75 76 77 78 79 80 81 82 83 |
# File 'lib/fluent/plugin/out_datadog.rb', line 71 def configure(conf) compat_parameters_convert(conf, :buffer) super return if @dd_hostname if not @use_http and @host == DD_DEFAULT_HTTP_ENDPOINT @host = DD_DEFAULT_TCP_ENDPOINT end # Set dd_hostname if not already set (can be set when using fluentd as aggregator) @dd_hostname = %x[hostname -f 2> /dev/null].strip @dd_hostname = Socket.gethostname if @dd_hostname.empty? end |
#enrich_record(tag, time, record) ⇒ Object
Enrich records with metadata such as service, tags or source
214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 |
# File 'lib/fluent/plugin/out_datadog.rb', line 214 def enrich_record(tag, time, record) if @dd_sourcecategory record["ddsourcecategory"] ||= @dd_sourcecategory end if @dd_source record["ddsource"] ||= @dd_source end if record["ddtags"] ||= end if @service record["service"] ||= @service end if @dd_hostname # set the record hostname to the configured dd_hostname only # if the record hostname is empty, ensuring having a hostname set # even if the record doesn't contain any. record["hostname"] ||= @dd_hostname end if @include_tag_key record[@tag_key] = tag end # If @timestamp_key already exists, we don't overwrite it. if and record[].nil? and time record[] = Time.at(time).utc.iso8601(3) end = (record) unless .empty? if record["ddtags"].nil? || record["ddtags"].empty? record["ddtags"] = else record["ddtags"] = record["ddtags"] + "," + end end record end |
#format(tag, time, record) ⇒ Object
This method is called when an event reaches Fluentd.
108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 |
# File 'lib/fluent/plugin/out_datadog.rb', line 108 def format(tag, time, record) # When Fluent::EventTime is msgpack'ed it gets converted to int with seconds # precision only. We explicitly convert it to floating point number, which # is compatible with Time.at below. record = enrich_record(tag, time.to_f, record) if @use_http record = Yajl.dump(record) else if @use_json record = "#{api_key} #{Yajl.dump(record)}" else record = "#{api_key} #{record}" end end [record].to_msgpack end |
#format_http_event_batch(events) ⇒ Object
Format batch of http events
209 210 211 |
# File 'lib/fluent/plugin/out_datadog.rb', line 209 def format_http_event_batch(events) "[#{events.join(',')}]" end |
#formatted_to_msgpack_binary? ⇒ Boolean
89 90 91 |
# File 'lib/fluent/plugin/out_datadog.rb', line 89 def formatted_to_msgpack_binary? true end |
#get_container_tags(record) ⇒ Object
Collect docker and kubernetes tags for your logs using ‘filter_kubernetes_metadata` plugin, for more information about the attribute names, check: github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter/blob/master/lib/fluent/plugin/filter_kubernetes_metadata.rb#L265
408 409 410 411 412 413 |
# File 'lib/fluent/plugin/out_datadog.rb', line 408 def (record) [ (record), (record) ].compact.join(",") end |
#get_docker_tags(record) ⇒ Object
428 429 430 431 432 433 434 435 436 |
# File 'lib/fluent/plugin/out_datadog.rb', line 428 def (record) if record.key?('docker') and not record.fetch('docker').nil? docker = record['docker'] = Array.new .push("container_id:" + docker['container_id']) unless docker['container_id'].nil? return .join(",") end nil end |
#get_kubernetes_tags(record) ⇒ Object
415 416 417 418 419 420 421 422 423 424 425 426 |
# File 'lib/fluent/plugin/out_datadog.rb', line 415 def (record) if record.key?('kubernetes') and not record.fetch('kubernetes').nil? kubernetes = record['kubernetes'] = Array.new .push("image_name:" + kubernetes['container_image']) unless kubernetes['container_image'].nil? .push("container_name:" + kubernetes['container_name']) unless kubernetes['container_name'].nil? .push("kube_namespace:" + kubernetes['namespace_name']) unless kubernetes['namespace_name'].nil? .push("pod_name:" + kubernetes['pod_name']) unless kubernetes['pod_name'].nil? return .join(",") end nil end |
#gzip_compress(payload, compression_level) ⇒ Object
Compress logs with GZIP
254 255 256 257 258 259 260 261 262 263 264 |
# File 'lib/fluent/plugin/out_datadog.rb', line 254 def gzip_compress(payload, compression_level) gz = StringIO.new gz.set_encoding("BINARY") z = Zlib::GzipWriter.new(gz, compression_level) begin z.write(payload) ensure z.close end gz.string end |
#max(a, b) ⇒ Object
204 205 206 |
# File 'lib/fluent/plugin/out_datadog.rb', line 204 def max(a, b) a > b ? a : b end |
#multi_workers_ready? ⇒ Boolean
85 86 87 |
# File 'lib/fluent/plugin/out_datadog.rb', line 85 def multi_workers_ready? true end |
#new_client(logger, api_key, use_http, use_ssl, no_ssl_validation, host, ssl_port, port, http_proxy, use_compression, force_v1_routes) ⇒ Object
Build a new transport client
267 268 269 270 271 272 273 |
# File 'lib/fluent/plugin/out_datadog.rb', line 267 def new_client(logger, api_key, use_http, use_ssl, no_ssl_validation, host, ssl_port, port, http_proxy, use_compression, force_v1_routes) if use_http DatadogHTTPClient.new logger, use_ssl, no_ssl_validation, host, ssl_port, port, http_proxy, use_compression, api_key, force_v1_routes else DatadogTCPClient.new logger, use_ssl, no_ssl_validation, host, ssl_port, port end end |
#process_http_events(events, use_compression, compression_level, max_retries, max_backoff, max_batch_length, max_batch_size) ⇒ Object
Process and send a set of http events. Potentially break down this set of http events in smaller batches
149 150 151 152 153 154 155 156 157 158 |
# File 'lib/fluent/plugin/out_datadog.rb', line 149 def process_http_events(events, use_compression, compression_level, max_retries, max_backoff, max_batch_length, max_batch_size) batches = batch_http_events(events, max_batch_length, max_batch_size) batches.each do |batched_event| formatted_events = format_http_event_batch(batched_event) if use_compression formatted_events = gzip_compress(formatted_events, compression_level) end @client.send_retries(formatted_events, max_retries, max_backoff) end end |
#process_tcp_event(event, max_retries, max_backoff, max_batch_size) ⇒ Object
Process and send a single tcp event
161 162 163 164 165 166 |
# File 'lib/fluent/plugin/out_datadog.rb', line 161 def process_tcp_event(event, max_retries, max_backoff, max_batch_size) if event.bytesize > max_batch_size event = truncate(event, max_batch_size) end @client.send_retries(event, max_retries, max_backoff) end |
#shutdown ⇒ Object
98 99 100 |
# File 'lib/fluent/plugin/out_datadog.rb', line 98 def shutdown super end |
#start ⇒ Object
93 94 95 96 |
# File 'lib/fluent/plugin/out_datadog.rb', line 93 def start super @client = new_client(log, @api_key, @use_http, @use_ssl, @no_ssl_validation, @host, @ssl_port, @port, @http_proxy, @use_compression, @force_v1_routes) end |
#terminate ⇒ Object
102 103 104 105 |
# File 'lib/fluent/plugin/out_datadog.rb', line 102 def terminate super @client.close if @client end |
#truncate(event, max_length) ⇒ Object
Truncate events over the provided max length, appending a marker when truncated
195 196 197 198 199 200 201 202 |
# File 'lib/fluent/plugin/out_datadog.rb', line 195 def truncate(event, max_length) if event.length > max_length event = event[0..max_length - 1] event[max(0, max_length - DD_TRUNCATION_SUFFIX.length)..max_length - 1] = DD_TRUNCATION_SUFFIX return event end event end |
#write(chunk) ⇒ Object
NOTE! This method is called by internal thread, not Fluentd’s main thread. ‘chunk’ is a buffer chunk that includes multiple formatted events.
128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 |
# File 'lib/fluent/plugin/out_datadog.rb', line 128 def write(chunk) begin if @use_http events = Array.new chunk.msgpack_each do |record| next if record.empty? events.push record[0] end process_http_events(events, @use_compression, @compression_level, @max_retries, @max_backoff, DD_MAX_BATCH_LENGTH, DD_MAX_BATCH_SIZE) else chunk.msgpack_each do |record| next if record.empty? process_tcp_event(record[0], @max_retries, @max_backoff, DD_MAX_BATCH_SIZE) end end rescue Exception => e log.error("Uncaught processing exception in datadog forwarder #{e.message}") end end |