Class: Kafka::Cluster

Inherits:
Object
  • Object
show all
Defined in:
lib/kafka/cluster.rb

Overview

A cluster represents the state of a Kafka cluster. It needs to be initialized with a non-empty list of seed brokers. The first seed broker that the cluster can connect to will be asked for the cluster metadata, which allows the cluster to map topic partitions to the current leader for those partitions.

Instance Method Summary collapse

Constructor Details

#initialize(seed_brokers:, broker_pool:, logger:, resolve_seed_brokers: false) ⇒ Cluster

Initializes a Cluster with a set of seed brokers.

The cluster will try to fetch cluster metadata from one of the brokers.

Parameters:



23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
# File 'lib/kafka/cluster.rb', line 23

def initialize(seed_brokers:, broker_pool:, logger:, resolve_seed_brokers: false)
  if seed_brokers.empty?
    raise ArgumentError, "At least one seed broker must be configured"
  end

  @logger = TaggedLogger.new(logger)
  @seed_brokers = seed_brokers
  @broker_pool = broker_pool
  @resolve_seed_brokers = resolve_seed_brokers
  @cluster_info = nil
  @stale = true

  # This is the set of topics we need metadata for. If empty, metadata for
  # all topics will be fetched.
  @target_topics = Set.new
end

Instance Method Details

#add_target_topics(topics) ⇒ nil

Adds a list of topics to the target list. Only the topics on this list will be queried for metadata.

Parameters:

  • topics (Array<String>)

Returns:

  • (nil)


45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
# File 'lib/kafka/cluster.rb', line 45

def add_target_topics(topics)
  topics = Set.new(topics)
  unless topics.subset?(@target_topics)
    new_topics = topics - @target_topics

    unless new_topics.empty?
      if new_topics.any? { |topic| topic.nil? or topic.empty? }
        raise ArgumentError, "Topic must not be nil or empty"
      end

      @logger.info "New topics added to target list: #{new_topics.to_a.join(', ')}"

      @target_topics.merge(new_topics)

      refresh_metadata!
    end
  end
end

#alter_configs(broker_id, configs = []) ⇒ Object



166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
# File 'lib/kafka/cluster.rb', line 166

def alter_configs(broker_id, configs = [])
  options = {
    resources: [[Kafka::Protocol::RESOURCE_TYPE_CLUSTER, broker_id.to_s, configs]]
  }

  info = cluster_info.brokers.find {|broker| broker.node_id == broker_id }
  broker = @broker_pool.connect(info.host, info.port, node_id: info.node_id)

  response = broker.alter_configs(**options)

  response.resources.each do |resource|
    Protocol.handle_error(resource.error_code, resource.error_message)
  end

  nil
end

#alter_topic(name, configs = {}) ⇒ Object



271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
# File 'lib/kafka/cluster.rb', line 271

def alter_topic(name, configs = {})
  options = {
    resources: [[Kafka::Protocol::RESOURCE_TYPE_TOPIC, name, configs]]
  }

  broker = controller_broker

  @logger.info "Altering the config for topic `#{name}` using controller broker #{broker}"

  response = broker.alter_configs(**options)

  response.resources.each do |resource|
    Protocol.handle_error(resource.error_code, resource.error_message)
  end

  nil
end

#api_info(api_key) ⇒ Object



64
65
66
# File 'lib/kafka/cluster.rb', line 64

def api_info(api_key)
  apis.find {|api| api.api_key == api_key }
end

#apisObject



79
80
81
82
83
84
85
86
87
88
# File 'lib/kafka/cluster.rb', line 79

def apis
  @apis ||=
    begin
      response = random_broker.api_versions

      Protocol.handle_error(response.error_code)

      response.apis
    end
end

#clear_target_topicsnil

Clears the list of target topics.

Returns:

  • (nil)

See Also:



94
95
96
97
# File 'lib/kafka/cluster.rb', line 94

def clear_target_topics
  @target_topics.clear
  refresh_metadata!
end

#cluster_infoObject



404
405
406
# File 'lib/kafka/cluster.rb', line 404

def cluster_info
  @cluster_info ||= fetch_cluster_info
end

#create_partitions_for(name, num_partitions:, timeout:) ⇒ Object



310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
# File 'lib/kafka/cluster.rb', line 310

def create_partitions_for(name, num_partitions:, timeout:)
  options = {
    topics: [[name, num_partitions, nil]],
    timeout: timeout
  }

  broker = controller_broker

  @logger.info "Creating #{num_partitions} partition(s) for topic `#{name}` using controller broker #{broker}"

  response = broker.create_partitions(**options)

  response.errors.each do |topic, error_code, error_message|
    Protocol.handle_error(error_code, error_message)
  end
  mark_as_stale!

  @logger.info "Topic `#{name}` was updated"
end

#create_topic(name, num_partitions:, replication_factor:, timeout:, config:) ⇒ Object



192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
# File 'lib/kafka/cluster.rb', line 192

def create_topic(name, num_partitions:, replication_factor:, timeout:, config:)
  options = {
    topics: {
      name => {
        num_partitions: num_partitions,
        replication_factor: replication_factor,
        config: config,
      }
    },
    timeout: timeout,
  }

  broker = controller_broker

  @logger.info "Creating topic `#{name}` using controller broker #{broker}"

  response = broker.create_topics(**options)

  response.errors.each do |topic, error_code|
    Protocol.handle_error(error_code)
  end

  begin
    partitions_for(name).each do |info|
      Protocol.handle_error(info.partition_error_code)
    end
  rescue Kafka::LeaderNotAvailable
    @logger.warn "Leader not yet available for `#{name}`, waiting 1s..."
    sleep 1

    retry
  rescue Kafka::UnknownTopicOrPartition
    @logger.warn "Topic `#{name}` not yet created, waiting 1s..."
    sleep 1

    retry
  end

  @logger.info "Topic `#{name}` was created"
end

#delete_topic(name, timeout:) ⇒ Object



233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
# File 'lib/kafka/cluster.rb', line 233

def delete_topic(name, timeout:)
  options = {
    topics: [name],
    timeout: timeout,
  }

  broker = controller_broker

  @logger.info "Deleting topic `#{name}` using controller broker #{broker}"

  response = broker.delete_topics(**options)

  response.errors.each do |topic, error_code|
    Protocol.handle_error(error_code)
  end

  @logger.info "Topic `#{name}` was deleted"
end

#describe_configs(broker_id, configs = []) ⇒ Object



149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
# File 'lib/kafka/cluster.rb', line 149

def describe_configs(broker_id, configs = [])
  options = {
    resources: [[Kafka::Protocol::RESOURCE_TYPE_CLUSTER, broker_id.to_s, configs]]
  }

  info = cluster_info.brokers.find {|broker| broker.node_id == broker_id }
  broker = @broker_pool.connect(info.host, info.port, node_id: info.node_id)

  response = broker.describe_configs(**options)

  response.resources.each do |resource|
    Protocol.handle_error(resource.error_code, resource.error_message)
  end

  response.resources.first.configs
end

#describe_group(group_id) ⇒ Object



289
290
291
292
293
294
# File 'lib/kafka/cluster.rb', line 289

def describe_group(group_id)
  response = get_group_coordinator(group_id: group_id).describe_groups(group_ids: [group_id])
  group = response.groups.first
  Protocol.handle_error(group.error_code)
  group
end

#describe_topic(name, configs = []) ⇒ Object



252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
# File 'lib/kafka/cluster.rb', line 252

def describe_topic(name, configs = [])
  options = {
    resources: [[Kafka::Protocol::RESOURCE_TYPE_TOPIC, name, configs]]
  }
  broker = controller_broker

  @logger.info "Fetching topic `#{name}`'s configs using controller broker #{broker}"

  response = broker.describe_configs(**options)

  response.resources.each do |resource|
    Protocol.handle_error(resource.error_code, resource.error_message)
  end
  topic_description = response.resources.first
  topic_description.configs.each_with_object({}) do |config, hash|
    hash[config.name] = config.value
  end
end

#disconnectObject



400
401
402
# File 'lib/kafka/cluster.rb', line 400

def disconnect
  @broker_pool.close
end

#fetch_group_offsets(group_id) ⇒ Object



296
297
298
299
300
301
302
303
304
305
306
307
308
# File 'lib/kafka/cluster.rb', line 296

def fetch_group_offsets(group_id)
  topics = get_group_coordinator(group_id: group_id)
    .fetch_offsets(group_id: group_id, topics: nil)
    .topics

  topics.each do |_, partitions|
    partitions.each do |_, response|
      Protocol.handle_error(response.error_code)
    end
  end

  topics
end

#get_group_coordinator(group_id:) ⇒ Broker

Finds the broker acting as the coordinator of the given group.

Parameters:

  • group_id (String)

Returns:

  • (Broker)

    the broker that's currently coordinator.



125
126
127
128
129
# File 'lib/kafka/cluster.rb', line 125

def get_group_coordinator(group_id:)
  @logger.debug "Getting group coordinator for `#{group_id}`"
  
  get_coordinator(Kafka::Protocol::COORDINATOR_TYPE_GROUP, group_id)
end

#get_leader(topic, partition) ⇒ Broker

Finds the broker acting as the leader of the given topic and partition.

Parameters:

  • topic (String)
  • partition (Integer)

Returns:

  • (Broker)

    the broker that's currently leader.



117
118
119
# File 'lib/kafka/cluster.rb', line 117

def get_leader(topic, partition)
  connect_to_broker(get_leader_id(topic, partition))
end

#get_transaction_coordinator(transactional_id:) ⇒ Broker

Finds the broker acting as the coordinator of the given transaction.

Parameters:

  • transactional_id (String)

Returns:

  • (Broker)

    the broker that's currently coordinator.



135
136
137
138
139
140
141
142
143
144
145
146
147
# File 'lib/kafka/cluster.rb', line 135

def get_transaction_coordinator(transactional_id:)
  @logger.debug "Getting transaction coordinator for `#{transactional_id}`"

  

  if transactional_id.nil?
    # Get a random_broker
    @logger.debug "Transaction ID is not available. Choose a random broker."
    return random_broker
  else
    get_coordinator(Kafka::Protocol::COORDINATOR_TYPE_TRANSACTION, transactional_id)
  end
end

#list_groupsObject



391
392
393
394
395
396
397
398
# File 'lib/kafka/cluster.rb', line 391

def list_groups
  
  cluster_info.brokers.map do |broker|
    response = connect_to_broker(broker.node_id).list_groups
    Protocol.handle_error(response.error_code)
    response.groups.map(&:group_id)
  end.flatten.uniq
end

#list_topicsObject

Lists all topics in the cluster.



384
385
386
387
388
389
# File 'lib/kafka/cluster.rb', line 384

def list_topics
  response = random_broker.(topics: nil)
  response.topics.select do |topic|
    topic.topic_error_code == 0
  end.map(&:topic_name)
end

#mark_as_stale!Object



99
100
101
# File 'lib/kafka/cluster.rb', line 99

def mark_as_stale!
  @stale = true
end

#partitions_for(topic) ⇒ Object



183
184
185
186
187
188
189
190
# File 'lib/kafka/cluster.rb', line 183

def partitions_for(topic)
  add_target_topics([topic])
  
  cluster_info.partitions_for(topic)
rescue Kafka::ProtocolError
  mark_as_stale!
  raise
end

#refresh_metadata!Object



103
104
105
106
# File 'lib/kafka/cluster.rb', line 103

def refresh_metadata!
  @cluster_info = nil
  cluster_info
end

#refresh_metadata_if_necessary!Object



108
109
110
# File 'lib/kafka/cluster.rb', line 108

def 
  refresh_metadata! if @stale
end

#resolve_offset(topic, partition, offset) ⇒ Object



372
373
374
# File 'lib/kafka/cluster.rb', line 372

def resolve_offset(topic, partition, offset)
  resolve_offsets(topic, [partition], offset).fetch(partition)
end

#resolve_offsets(topic, partitions, offset) ⇒ Object



330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
# File 'lib/kafka/cluster.rb', line 330

def resolve_offsets(topic, partitions, offset)
  add_target_topics([topic])
  

  partitions_by_broker = partitions.each_with_object({}) {|partition, hsh|
    broker = get_leader(topic, partition)

    hsh[broker] ||= []
    hsh[broker] << partition
  }

  if offset == :earliest
    offset = -2
  elsif offset == :latest
    offset = -1
  end

  offsets = {}

  partitions_by_broker.each do |broker, broker_partitions|
    response = broker.list_offsets(
      topics: {
        topic => broker_partitions.map {|partition|
          {
            partition: partition,
            time: offset
          }
        }
      }
    )

    broker_partitions.each do |partition|
      offsets[partition] = response.offset_for(topic, partition)
    end
  end

  offsets
rescue Kafka::ProtocolError
  mark_as_stale!
  raise
end

#supports_api?(api_key, version = nil) ⇒ Boolean

Returns:

  • (Boolean)


68
69
70
71
72
73
74
75
76
77
# File 'lib/kafka/cluster.rb', line 68

def supports_api?(api_key, version = nil)
  info = api_info(api_key)
  if info.nil?
    return false
  elsif version.nil?
    return true
  else
    return info.version_supported?(version)
  end
end

#topicsObject



376
377
378
379
380
381
# File 'lib/kafka/cluster.rb', line 376

def topics
  
  cluster_info.topics.select do |topic|
    topic.topic_error_code == 0
  end.map(&:topic_name)
end