Class: StatsD::Instrument::Aggregator

Inherits:
Object
  • Object
show all
Defined in:
lib/statsd/instrument/aggregator.rb

Constant Summary collapse

CONST_SAMPLE_RATE =
1.0
COUNT =
:c
DISTRIBUTION =
:d
MEASURE =
:ms
HISTOGRAM =
:h
GAUGE =
:g

Class Method Summary collapse

Instance Method Summary collapse

Constructor Details

#initialize(sink, datagram_builder_class, prefix, default_tags, flush_interval: 5.0, max_values: 100) ⇒ Aggregator

Returns a new instance of Aggregator.

Parameters:

  • sink (#<<)

    The sink to write the aggregated metrics to.

  • datagram_builder_class (Class)

    The class to use for building datagrams.

  • prefix (String)

    The prefix to add to all metrics.

  • default_tags (Array<String>)

    The tags to add to all metrics.

  • flush_interval (Float) (defaults to: 5.0)

    The interval at which to flush the aggregated metrics.

  • max_values (Integer) (defaults to: 100)

    The maximum number of values to aggregate before flushing.



81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
# File 'lib/statsd/instrument/aggregator.rb', line 81

def initialize(sink, datagram_builder_class, prefix, default_tags, flush_interval: 5.0, max_values: 100)
  @sink = sink
  @datagram_builder_class = datagram_builder_class
  @metric_prefix = prefix
  @default_tags = default_tags
  @datagram_builders = {
    true: nil,
    false: nil,
  }
  @max_values = max_values

  # Mutex protects the aggregation_state and flush_thread from concurrent access
  @mutex = Mutex.new
  @aggregation_state = {}

  @pid = Process.pid
  @flush_interval = flush_interval
  @flush_thread = Thread.new do
    Thread.current.abort_on_exception = true
    loop do
      sleep(@flush_interval)
      thread_healthcheck
      flush
    rescue => e
      StatsD.logger.error { "[#{self.class.name}] Error in flush thread: #{e}" }
      raise e
    end
  end

  ObjectSpace.define_finalizer(
    self,
    self.class.finalize(@aggregation_state, @sink, @datagram_builders, @datagram_builder_class, @default_tags),
  )
end

Class Method Details

.finalize(aggregation_state, sink, datagram_builders, datagram_builder_class, default_tags) ⇒ Object



35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
# File 'lib/statsd/instrument/aggregator.rb', line 35

def finalize(aggregation_state, sink, datagram_builders, datagram_builder_class, default_tags)
  proc do
    aggregation_state.each do |key, agg_value|
      no_prefix = key.no_prefix
      datagram_builders[no_prefix] ||= datagram_builder_class.new(
        prefix: no_prefix ? nil : @metric_prefix,
        default_tags: default_tags,
      )
      case key.type
      when COUNT
        sink << datagram_builders[no_prefix].c(
          key.name,
          agg_value,
          CONST_SAMPLE_RATE,
          key.tags,
        )
      when DISTRIBUTION, MEASURE, HISTOGRAM
        sink << datagram_builders[no_prefix].timing_value_packed(
          key.name,
          key.type.to_s,
          agg_value,
          CONST_SAMPLE_RATE,
          key.tags,
        )
      when GAUGE
        sink << datagram_builders[no_prefix].g(
          key.name,
          agg_value,
          CONST_SAMPLE_RATE,
          key.tags,
        )
      else
        StatsD.logger.error { "[#{self.class.name}] Unknown aggregation type: #{key.type}" }
      end
    end
    aggregation_state.clear
  end
end

Instance Method Details

#aggregate_timing(name, value, tags: [], no_prefix: false, type: DISTRIBUTION) ⇒ Object



137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
# File 'lib/statsd/instrument/aggregator.rb', line 137

def aggregate_timing(name, value, tags: [], no_prefix: false, type: DISTRIBUTION)
  unless thread_healthcheck
    sink << datagram_builder(no_prefix: no_prefix).timing_value_packed(
      name, type, [value], CONST_SAMPLE_RATE, tags
    )
    return
  end

  tags = tags_sorted(tags)
  key = packet_key(name, tags, no_prefix, type)

  @mutex.synchronize do
    values = @aggregation_state[key] ||= []
    if values.size + 1 >= @max_values
      do_flush
    end
    values << value
  end
end

#flushObject



171
172
173
# File 'lib/statsd/instrument/aggregator.rb', line 171

def flush
  @mutex.synchronize { do_flush }
end

#gauge(name, value, tags: [], no_prefix: false) ⇒ Object



157
158
159
160
161
162
163
164
165
166
167
168
169
# File 'lib/statsd/instrument/aggregator.rb', line 157

def gauge(name, value, tags: [], no_prefix: false)
  unless thread_healthcheck
    sink << datagram_builder(no_prefix: no_prefix).g(name, value, CONST_SAMPLE_RATE, tags)
    return
  end

  tags = tags_sorted(tags)
  key = packet_key(name, tags, no_prefix, GAUGE)

  @mutex.synchronize do
    @aggregation_state[key] = value
  end
end

#increment(name, value = 1, tags: [], no_prefix: false) ⇒ void

This method returns an undefined value.

Increment a counter by a given value and save it for later flushing.

Parameters:

  • name (String)

    The name of the counter.

  • value (Integer) (defaults to: 1)

    The value to increment the counter by.

  • tags (Hash{String, Symbol => String}, Array<String>) (defaults to: [])

    The tags to attach to the counter.

  • no_prefix (Boolean) (defaults to: false)

    If true, the metric will not be prefixed.



122
123
124
125
126
127
128
129
130
131
132
133
134
135
# File 'lib/statsd/instrument/aggregator.rb', line 122

def increment(name, value = 1, tags: [], no_prefix: false)
  unless thread_healthcheck
    sink << datagram_builder(no_prefix: no_prefix).c(name, value, CONST_SAMPLE_RATE, tags)
    return
  end

  tags = tags_sorted(tags)
  key = packet_key(name, tags, no_prefix, COUNT)

  @mutex.synchronize do
    @aggregation_state[key] ||= 0
    @aggregation_state[key] += value
  end
end