Class: LogStash::Agent

Inherits:
Object
  • Object
show all
Includes:
Util::Loggable
Defined in:
lib/logstash/agent.rb

Constant Summary collapse

STARTED_AT =
Time.now.freeze

Instance Attribute Summary collapse

Instance Method Summary collapse

Methods included from Util::Loggable

included, #slow_logger

Constructor Details

#initialize(settings = LogStash::SETTINGS, source_loader = nil) ⇒ Agent

initialize method for LogStash::Agent

Parameters:

  • params (Hash)

    potential parameters are: :name [String] - identifier for the agent :auto_reload [Boolean] - enable reloading of pipelines :reload_interval [Integer] - reload pipelines every X seconds



36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
# File 'lib/logstash/agent.rb', line 36

def initialize(settings = LogStash::SETTINGS, source_loader = nil)
  @logger = self.class.logger
  @settings = settings
  @auto_reload = setting("config.reload.automatic")
  @ephemeral_id = SecureRandom.uuid

  # Special bus object for inter-pipelines communications. Used by the `pipeline` input/output
  @pipeline_bus = org.logstash.plugins.pipeline.PipelineBus.new

  @pipelines = java.util.concurrent.ConcurrentHashMap.new();

  @name = setting("node.name")
  @http_host = setting("http.host")
  @http_port = setting("http.port")
  @http_environment = setting("http.environment")
  # Generate / load the persistent uuid
  id

  # This is for backward compatibility in the tests
  if source_loader.nil?
    @source_loader = LogStash::Config::SourceLoader.new
    @source_loader.add_source(LogStash::Config::Source::Local.new(@settings))
  else
    @source_loader = source_loader
  end

  # Normalize time interval to seconds
  @reload_interval = setting("config.reload.interval") / 1_000_000_000.0

  @collect_metric = setting("metric.collect")

  # Create the collectors and configured it with the library
  configure_metrics_collectors

  @state_resolver = LogStash::StateResolver.new(metric)

  @pipeline_reload_metric = metric.namespace([:stats, :pipelines])
  @instance_reload_metric = metric.namespace([:stats, :reloads])
  initialize_agent_metrics

  @dispatcher = LogStash::EventDispatcher.new(self)
  LogStash::PLUGIN_REGISTRY.hooks.register_emitter(self.class, dispatcher)
  dispatcher.fire(:after_initialize)

  @running = Concurrent::AtomicBoolean.new(false)
end

Instance Attribute Details

#dispatcherObject (readonly)

Returns the value of attribute dispatcher.



28
29
30
# File 'lib/logstash/agent.rb', line 28

def dispatcher
  @dispatcher
end

#ephemeral_idObject (readonly)

Returns the value of attribute ephemeral_id.



28
29
30
# File 'lib/logstash/agent.rb', line 28

def ephemeral_id
  @ephemeral_id
end

#loggerObject

Returns the value of attribute logger.



29
30
31
# File 'lib/logstash/agent.rb', line 29

def logger
  @logger
end

#metricObject (readonly)

Returns the value of attribute metric.



28
29
30
# File 'lib/logstash/agent.rb', line 28

def metric
  @metric
end

#nameObject (readonly)

Returns the value of attribute name.



28
29
30
# File 'lib/logstash/agent.rb', line 28

def name
  @name
end

#pipeline_busObject (readonly)

Returns the value of attribute pipeline_bus.



28
29
30
# File 'lib/logstash/agent.rb', line 28

def pipeline_bus
  @pipeline_bus
end

#pipelinesObject (readonly)

Returns the value of attribute pipelines.



28
29
30
# File 'lib/logstash/agent.rb', line 28

def pipelines
  @pipelines
end

#settingsObject (readonly)

Returns the value of attribute settings.



28
29
30
# File 'lib/logstash/agent.rb', line 28

def settings
  @settings
end

#webserverObject (readonly)

Returns the value of attribute webserver.



28
29
30
# File 'lib/logstash/agent.rb', line 28

def webserver
  @webserver
end

Instance Method Details

#auto_reload?Boolean

Returns:

  • (Boolean)


125
126
127
# File 'lib/logstash/agent.rb', line 125

def auto_reload?
  @auto_reload
end

#converge_state_and_updateObject



137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
# File 'lib/logstash/agent.rb', line 137

def converge_state_and_update
  results = @source_loader.fetch

  unless results.success?
    if auto_reload?
      logger.debug("Could not fetch the configuration to converge, will retry", :message => results.error, :retrying_in => @reload_interval)
      return
    else
      raise "Could not fetch the configuration, message: #{results.error}"
    end
  end

  # We Lock any access on the pipelines, since the actions will modify the
  # content of it.
  converge_result = nil

  pipeline_actions = resolve_actions(results.response)
  converge_result = converge_state(pipeline_actions)
  update_metrics(converge_result)

  logger.info(
      "Pipelines running",
      :count => running_pipelines.size,
      :running_pipelines => running_pipelines.keys,
      :non_running_pipelines => non_running_pipelines.keys
  ) if converge_result.success? && converge_result.total > 0

  dispatch_events(converge_result)

  converge_result
rescue => e
  logger.error("An exception happened when converging configuration", :exception => e.class, :message => e.message, :backtrace => e.backtrace)
end

#executeObject



83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
# File 'lib/logstash/agent.rb', line 83

def execute
  @thread = Thread.current # this var is implicitly used by Stud.stop?
  logger.debug("Starting agent")

  transition_to_running

  converge_state_and_update

  start_webserver

  if auto_reload?
    # `sleep_then_run` instead of firing the interval right away
    Stud.interval(@reload_interval, :sleep_then_run => true) do
      # TODO(ph) OK, in reality, we should get out of the loop, but I am
      # worried about the implication of that change so instead when we are stopped
      # we don't converge.
      #
      # Logstash currently expect to be block here, the signal will force a kill on the agent making
      # the agent thread unblock
      #
      # Actually what we really need is one more state:
      #
      # init => running => stopping => stopped
      converge_state_and_update unless stopped?
    end
  else
    return 1 if clean_state?

    while !Stud.stop?
      if clean_state? || running_user_defined_pipelines?
        sleep(0.5)
      else
        break
      end
    end
  end

  return 0
ensure
  transition_to_stopped
end

#get_pipeline(pipeline_id) ⇒ Object



228
229
230
# File 'lib/logstash/agent.rb', line 228

def get_pipeline(pipeline_id)
  pipelines.get(pipeline_id)
end

#idObject



190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
# File 'lib/logstash/agent.rb', line 190

def id
  return @id if @id

  uuid = nil
  if ::File.exists?(id_path)
    begin
      uuid = ::File.open(id_path) {|f| f.each_line.first.chomp }
    rescue => e
      logger.warn("Could not open persistent UUID file!",
                  :path => id_path,
                  :error => e.message,
                  :class => e.class.name)
    end
  end

  if !uuid
    uuid = SecureRandom.uuid
    logger.info("No persistent UUID file found. Generating new UUID",
                :uuid => uuid,
                :path => id_path)
    begin
      ::File.open(id_path, 'w') {|f| f.write(uuid) }
    rescue => e
      logger.warn("Could not write persistent UUID file! Will use ephemeral UUID",
                  :uuid => uuid,
                  :path => id_path,
                  :error => e.message,
                  :class => e.class.name)
    end
  end

  @id = uuid
end

#id_pathObject



224
225
226
# File 'lib/logstash/agent.rb', line 224

def id_path
  @id_path ||= ::File.join(settings.get("path.data"), "uuid")
end

#non_running_pipelinesObject



240
241
242
# File 'lib/logstash/agent.rb', line 240

def non_running_pipelines
  pipelines.select {|id,pipeline| !running_pipeline?(id) }
end

#pipelines_countObject



232
233
234
# File 'lib/logstash/agent.rb', line 232

def pipelines_count
  pipelines.size
end

#running?Boolean

Returns:

  • (Boolean)


129
130
131
# File 'lib/logstash/agent.rb', line 129

def running?
  @running.value
end

#running_pipelinesObject



236
237
238
# File 'lib/logstash/agent.rb', line 236

def running_pipelines
  pipelines.select {|id,pipeline| running_pipeline?(id) }
end

#running_pipelines?Boolean

Returns:

  • (Boolean)


244
245
246
# File 'lib/logstash/agent.rb', line 244

def running_pipelines?
  running_pipelines_count > 0
end

#running_pipelines_countObject



248
249
250
# File 'lib/logstash/agent.rb', line 248

def running_pipelines_count
  running_pipelines.size
end

#running_user_defined_pipelinesObject



256
257
258
# File 'lib/logstash/agent.rb', line 256

def running_user_defined_pipelines
  pipelines.select {|id, pipeline| running_pipeline?(id) && !pipeline.system? }
end

#running_user_defined_pipelines?Boolean

Returns:

  • (Boolean)


252
253
254
# File 'lib/logstash/agent.rb', line 252

def running_user_defined_pipelines?
  !running_user_defined_pipelines.empty?
end

#shutdownObject



178
179
180
181
182
183
184
185
186
187
188
# File 'lib/logstash/agent.rb', line 178

def shutdown
  # Since we're shutting down we need to shutdown the DAG of pipelines that are talking to each other
  # in order of dependency.
  pipeline_bus.setBlockOnUnlisten(true)

  stop_collecting_metrics
  stop_webserver
  transition_to_stopped
  converge_result = shutdown_pipelines
  converge_result
end

#stopped?Boolean

Returns:

  • (Boolean)


133
134
135
# File 'lib/logstash/agent.rb', line 133

def stopped?
  !@running.value
end

#uptimeFixnum

Calculate the Logstash uptime in milliseconds

Returns:

  • (Fixnum)

    Uptime in milliseconds



174
175
176
# File 'lib/logstash/agent.rb', line 174

def uptime
  ((Time.now.to_f - STARTED_AT.to_f) * 1000.0).to_i
end

#with_running_user_defined_pipelines {|running_user_defined_pipelines| ... } ⇒ Object



260
261
262
# File 'lib/logstash/agent.rb', line 260

def with_running_user_defined_pipelines
  yield running_user_defined_pipelines
end