Class: Kafka::Cluster
- Inherits:
-
Object
- Object
- Kafka::Cluster
- Defined in:
- lib/kafka/cluster.rb
Overview
A cluster represents the state of a Kafka cluster. It needs to be initialized with a non-empty list of seed brokers. The first seed broker that the cluster can connect to will be asked for the cluster metadata, which allows the cluster to map topic partitions to the current leader for those partitions.
Instance Method Summary collapse
-
#add_target_topics(topics) ⇒ nil
Adds a list of topics to the target list.
- #alter_configs(broker_id, configs = []) ⇒ Object
- #alter_topic(name, configs = {}) ⇒ Object
- #api_info(api_key) ⇒ Object
- #apis ⇒ Object
-
#clear_target_topics ⇒ nil
Clears the list of target topics.
- #cluster_info ⇒ Object
- #create_partitions_for(name, num_partitions:, timeout:) ⇒ Object
- #create_topic(name, num_partitions:, replication_factor:, timeout:, config:) ⇒ Object
- #delete_topic(name, timeout:) ⇒ Object
- #describe_configs(broker_id, configs = []) ⇒ Object
- #describe_group(group_id) ⇒ Object
- #describe_topic(name, configs = []) ⇒ Object
- #disconnect ⇒ Object
- #fetch_group_offsets(group_id) ⇒ Object
-
#get_group_coordinator(group_id:) ⇒ Broker
Finds the broker acting as the coordinator of the given group.
-
#get_leader(topic, partition) ⇒ Broker
Finds the broker acting as the leader of the given topic and partition.
-
#get_transaction_coordinator(transactional_id:) ⇒ Broker
Finds the broker acting as the coordinator of the given transaction.
-
#initialize(seed_brokers:, broker_pool:, logger:) ⇒ Cluster
constructor
Initializes a Cluster with a set of seed brokers.
- #list_groups ⇒ Object
-
#list_topics ⇒ Object
Lists all topics in the cluster.
- #mark_as_stale! ⇒ Object
- #partitions_for(topic) ⇒ Object
- #refresh_metadata! ⇒ Object
- #refresh_metadata_if_necessary! ⇒ Object
- #resolve_offset(topic, partition, offset) ⇒ Object
- #resolve_offsets(topic, partitions, offset) ⇒ Object
- #supports_api?(api_key, version = nil) ⇒ Boolean
- #topics ⇒ Object
Constructor Details
#initialize(seed_brokers:, broker_pool:, logger:) ⇒ Cluster
Initializes a Cluster with a set of seed brokers.
The cluster will try to fetch cluster metadata from one of the brokers.
21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 |
# File 'lib/kafka/cluster.rb', line 21 def initialize(seed_brokers:, broker_pool:, logger:) if seed_brokers.empty? raise ArgumentError, "At least one seed broker must be configured" end @logger = TaggedLogger.new(logger) @seed_brokers = seed_brokers @broker_pool = broker_pool @cluster_info = nil @stale = true # This is the set of topics we need metadata for. If empty, metadata for # all topics will be fetched. @target_topics = Set.new end |
Instance Method Details
#add_target_topics(topics) ⇒ nil
Adds a list of topics to the target list. Only the topics on this list will be queried for metadata.
42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 |
# File 'lib/kafka/cluster.rb', line 42 def add_target_topics(topics) topics = Set.new(topics) unless topics.subset?(@target_topics) new_topics = topics - @target_topics unless new_topics.empty? if new_topics.any? { |topic| topic.nil? or topic.empty? } raise ArgumentError, "Topic must not be nil or empty" end @logger.info "New topics added to target list: #{new_topics.to_a.join(', ')}" @target_topics.merge(new_topics) end end end |
#alter_configs(broker_id, configs = []) ⇒ Object
163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 |
# File 'lib/kafka/cluster.rb', line 163 def alter_configs(broker_id, configs = []) = { resources: [[Kafka::Protocol::RESOURCE_TYPE_CLUSTER, broker_id.to_s, configs]] } info = cluster_info.brokers.find {|broker| broker.node_id == broker_id } broker = @broker_pool.connect(info.host, info.port, node_id: info.node_id) response = broker.alter_configs(**) response.resources.each do |resource| Protocol.handle_error(resource.error_code, resource.) end nil end |
#alter_topic(name, configs = {}) ⇒ Object
268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 |
# File 'lib/kafka/cluster.rb', line 268 def alter_topic(name, configs = {}) = { resources: [[Kafka::Protocol::RESOURCE_TYPE_TOPIC, name, configs]] } broker = controller_broker @logger.info "Altering the config for topic `#{name}` using controller broker #{broker}" response = broker.alter_configs(**) response.resources.each do |resource| Protocol.handle_error(resource.error_code, resource.) end nil end |
#api_info(api_key) ⇒ Object
61 62 63 |
# File 'lib/kafka/cluster.rb', line 61 def api_info(api_key) apis.find {|api| api.api_key == api_key } end |
#apis ⇒ Object
76 77 78 79 80 81 82 83 84 85 |
# File 'lib/kafka/cluster.rb', line 76 def apis @apis ||= begin response = random_broker.api_versions Protocol.handle_error(response.error_code) response.apis end end |
#clear_target_topics ⇒ nil
Clears the list of target topics.
91 92 93 94 |
# File 'lib/kafka/cluster.rb', line 91 def clear_target_topics @target_topics.clear end |
#cluster_info ⇒ Object
401 402 403 |
# File 'lib/kafka/cluster.rb', line 401 def cluster_info @cluster_info ||= fetch_cluster_info end |
#create_partitions_for(name, num_partitions:, timeout:) ⇒ Object
307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 |
# File 'lib/kafka/cluster.rb', line 307 def create_partitions_for(name, num_partitions:, timeout:) = { topics: [[name, num_partitions, nil]], timeout: timeout } broker = controller_broker @logger.info "Creating #{num_partitions} partition(s) for topic `#{name}` using controller broker #{broker}" response = broker.create_partitions(**) response.errors.each do |topic, error_code, | Protocol.handle_error(error_code, ) end mark_as_stale! @logger.info "Topic `#{name}` was updated" end |
#create_topic(name, num_partitions:, replication_factor:, timeout:, config:) ⇒ Object
189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 |
# File 'lib/kafka/cluster.rb', line 189 def create_topic(name, num_partitions:, replication_factor:, timeout:, config:) = { topics: { name => { num_partitions: num_partitions, replication_factor: replication_factor, config: config, } }, timeout: timeout, } broker = controller_broker @logger.info "Creating topic `#{name}` using controller broker #{broker}" response = broker.create_topics(**) response.errors.each do |topic, error_code| Protocol.handle_error(error_code) end begin partitions_for(name).each do |info| Protocol.handle_error(info.partition_error_code) end rescue Kafka::LeaderNotAvailable @logger.warn "Leader not yet available for `#{name}`, waiting 1s..." sleep 1 retry rescue Kafka::UnknownTopicOrPartition @logger.warn "Topic `#{name}` not yet created, waiting 1s..." sleep 1 retry end @logger.info "Topic `#{name}` was created" end |
#delete_topic(name, timeout:) ⇒ Object
230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 |
# File 'lib/kafka/cluster.rb', line 230 def delete_topic(name, timeout:) = { topics: [name], timeout: timeout, } broker = controller_broker @logger.info "Deleting topic `#{name}` using controller broker #{broker}" response = broker.delete_topics(**) response.errors.each do |topic, error_code| Protocol.handle_error(error_code) end @logger.info "Topic `#{name}` was deleted" end |
#describe_configs(broker_id, configs = []) ⇒ Object
146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 |
# File 'lib/kafka/cluster.rb', line 146 def describe_configs(broker_id, configs = []) = { resources: [[Kafka::Protocol::RESOURCE_TYPE_CLUSTER, broker_id.to_s, configs]] } info = cluster_info.brokers.find {|broker| broker.node_id == broker_id } broker = @broker_pool.connect(info.host, info.port, node_id: info.node_id) response = broker.describe_configs(**) response.resources.each do |resource| Protocol.handle_error(resource.error_code, resource.) end response.resources.first.configs end |
#describe_group(group_id) ⇒ Object
286 287 288 289 290 291 |
# File 'lib/kafka/cluster.rb', line 286 def describe_group(group_id) response = get_group_coordinator(group_id: group_id).describe_groups(group_ids: [group_id]) group = response.groups.first Protocol.handle_error(group.error_code) group end |
#describe_topic(name, configs = []) ⇒ Object
249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 |
# File 'lib/kafka/cluster.rb', line 249 def describe_topic(name, configs = []) = { resources: [[Kafka::Protocol::RESOURCE_TYPE_TOPIC, name, configs]] } broker = controller_broker @logger.info "Fetching topic `#{name}`'s configs using controller broker #{broker}" response = broker.describe_configs(**) response.resources.each do |resource| Protocol.handle_error(resource.error_code, resource.) end topic_description = response.resources.first topic_description.configs.each_with_object({}) do |config, hash| hash[config.name] = config.value end end |
#disconnect ⇒ Object
397 398 399 |
# File 'lib/kafka/cluster.rb', line 397 def disconnect @broker_pool.close end |
#fetch_group_offsets(group_id) ⇒ Object
293 294 295 296 297 298 299 300 301 302 303 304 305 |
# File 'lib/kafka/cluster.rb', line 293 def fetch_group_offsets(group_id) topics = get_group_coordinator(group_id: group_id) .fetch_offsets(group_id: group_id, topics: nil) .topics topics.each do |_, partitions| partitions.each do |_, response| Protocol.handle_error(response.error_code) end end topics end |
#get_group_coordinator(group_id:) ⇒ Broker
Finds the broker acting as the coordinator of the given group.
122 123 124 125 126 |
# File 'lib/kafka/cluster.rb', line 122 def get_group_coordinator(group_id:) @logger.debug "Getting group coordinator for `#{group_id}`" get_coordinator(Kafka::Protocol::COORDINATOR_TYPE_GROUP, group_id) end |
#get_leader(topic, partition) ⇒ Broker
Finds the broker acting as the leader of the given topic and partition.
114 115 116 |
# File 'lib/kafka/cluster.rb', line 114 def get_leader(topic, partition) connect_to_broker(get_leader_id(topic, partition)) end |
#get_transaction_coordinator(transactional_id:) ⇒ Broker
Finds the broker acting as the coordinator of the given transaction.
132 133 134 135 136 137 138 139 140 141 142 143 144 |
# File 'lib/kafka/cluster.rb', line 132 def get_transaction_coordinator(transactional_id:) @logger.debug "Getting transaction coordinator for `#{transactional_id}`" if transactional_id.nil? # Get a random_broker @logger.debug "Transaction ID is not available. Choose a random broker." return random_broker else get_coordinator(Kafka::Protocol::COORDINATOR_TYPE_TRANSACTION, transactional_id) end end |
#list_groups ⇒ Object
388 389 390 391 392 393 394 395 |
# File 'lib/kafka/cluster.rb', line 388 def list_groups cluster_info.brokers.map do |broker| response = connect_to_broker(broker.node_id).list_groups Protocol.handle_error(response.error_code) response.groups.map(&:group_id) end.flatten.uniq end |
#list_topics ⇒ Object
Lists all topics in the cluster.
381 382 383 384 385 386 |
# File 'lib/kafka/cluster.rb', line 381 def list_topics response = random_broker.(topics: nil) response.topics.select do |topic| topic.topic_error_code == 0 end.map(&:topic_name) end |
#mark_as_stale! ⇒ Object
96 97 98 |
# File 'lib/kafka/cluster.rb', line 96 def mark_as_stale! @stale = true end |
#partitions_for(topic) ⇒ Object
180 181 182 183 184 185 186 187 |
# File 'lib/kafka/cluster.rb', line 180 def partitions_for(topic) add_target_topics([topic]) cluster_info.partitions_for(topic) rescue Kafka::ProtocolError mark_as_stale! raise end |
#refresh_metadata! ⇒ Object
100 101 102 103 |
# File 'lib/kafka/cluster.rb', line 100 def @cluster_info = nil cluster_info end |
#refresh_metadata_if_necessary! ⇒ Object
105 106 107 |
# File 'lib/kafka/cluster.rb', line 105 def if @stale end |
#resolve_offset(topic, partition, offset) ⇒ Object
369 370 371 |
# File 'lib/kafka/cluster.rb', line 369 def resolve_offset(topic, partition, offset) resolve_offsets(topic, [partition], offset).fetch(partition) end |
#resolve_offsets(topic, partitions, offset) ⇒ Object
327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 |
# File 'lib/kafka/cluster.rb', line 327 def resolve_offsets(topic, partitions, offset) add_target_topics([topic]) partitions_by_broker = partitions.each_with_object({}) {|partition, hsh| broker = get_leader(topic, partition) hsh[broker] ||= [] hsh[broker] << partition } if offset == :earliest offset = -2 elsif offset == :latest offset = -1 end offsets = {} partitions_by_broker.each do |broker, broker_partitions| response = broker.list_offsets( topics: { topic => broker_partitions.map {|partition| { partition: partition, time: offset } } } ) broker_partitions.each do |partition| offsets[partition] = response.offset_for(topic, partition) end end offsets rescue Kafka::ProtocolError mark_as_stale! raise end |
#supports_api?(api_key, version = nil) ⇒ Boolean
65 66 67 68 69 70 71 72 73 74 |
# File 'lib/kafka/cluster.rb', line 65 def supports_api?(api_key, version = nil) info = api_info(api_key) if info.nil? return false elsif version.nil? return true else return info.version_supported?(version) end end |
#topics ⇒ Object
373 374 375 376 377 378 |
# File 'lib/kafka/cluster.rb', line 373 def topics cluster_info.topics.select do |topic| topic.topic_error_code == 0 end.map(&:topic_name) end |