Class: Fluent::KinesisStreamsAggregatedOutput

Inherits:
KinesisOutput
  • Object
show all
Includes:
Fluent::KinesisHelper::API::BatchRequest, Fluent::KinesisHelper::Aggregator::Mixin
Defined in:
lib/fluent/plugin/out_kinesis_streams_aggregated.rb

Constant Summary collapse

RequestType =
:streams_aggregated
BatchRequestLimitCount =
100_000
BatchRequestLimitSize =
1024 * 1024

Constants included from Fluent::KinesisHelper::Aggregator::Mixin

Fluent::KinesisHelper::Aggregator::Mixin::AggregateOffset, Fluent::KinesisHelper::Aggregator::Mixin::RecordOffset

Constants included from Fluent::KinesisHelper::API

Fluent::KinesisHelper::API::MaxRecordSize

Instance Method Summary collapse

Methods included from Fluent::KinesisHelper::API::BatchRequest

included

Methods included from Fluent::KinesisHelper::Aggregator::Mixin

#aggregator, included

Methods inherited from KinesisOutput

fluentd_v0_12?, #multi_workers_ready?

Methods included from Fluent::KinesisHelper::API

included

Methods included from Fluent::KinesisHelper::Client

#client, included

Instance Method Details

#configure(conf) ⇒ Object



31
32
33
34
35
36
# File 'lib/fluent/plugin/out_kinesis_streams_aggregated.rb', line 31

def configure(conf)
  super
  @partition_key_generator = create_partition_key_generator
  @batch_request_max_size -= offset
  @max_record_size -= offset
end

#format(tag, time, record) ⇒ Object



38
39
40
41
42
# File 'lib/fluent/plugin/out_kinesis_streams_aggregated.rb', line 38

def format(tag, time, record)
  format_for_api do
    [@data_formatter.call(tag, time, record)]
  end
end

#offsetObject



58
59
60
# File 'lib/fluent/plugin/out_kinesis_streams_aggregated.rb', line 58

def offset
  @offset ||= AggregateOffset + @partition_key_generator.call.size*2
end

#write(chunk) ⇒ Object



44
45
46
47
48
49
50
51
52
53
54
55
56
# File 'lib/fluent/plugin/out_kinesis_streams_aggregated.rb', line 44

def write(chunk)
  write_records_batch(chunk) do |batch|
    key = @partition_key_generator.call
    records = batch.map{|(data)|data}
    client.put_records(
      stream_name: @stream_name,
      records: [{
        partition_key: key,
        data: aggregator.aggregate(records, key),
      }],
    )
  end
end