Class: CassandraObject::Adapters::CassandraSchemalessAdapter
- Inherits:
-
AbstractAdapter
- Object
- AbstractAdapter
- CassandraObject::Adapters::CassandraSchemalessAdapter
- Includes:
- CassandraObject::AdapterExtension
- Defined in:
- lib/initializers/reconnection.rb,
lib/cassandra_object/adapters/cassandra_schemaless_adapter.rb
Defined Under Namespace
Classes: QueryBuilder
Instance Attribute Summary
Attributes inherited from AbstractAdapter
Instance Method Summary collapse
- #cassandra_cluster_options ⇒ Object
- #cassandra_version ⇒ Object
- #connection ⇒ Object
-
#consistency ⇒ Object
/SCHEMA.
- #consistency=(val) ⇒ Object
- #create_ids_where_clause(ids) ⇒ Object
-
#create_table(table_name, params = {}) ⇒ Object
SCHEMA.
- #delete(table, ids) ⇒ Object
- #drop_table(table_name, confirm = false) ⇒ Object
- #execute(statement, arguments = []) ⇒ Object
- #execute_async(queries, arguments = [], per_page = nil, next_cursor = nil) ⇒ Object
- #execute_batch(statements) ⇒ Object
- #insert(table, id, attributes, ttl = nil) ⇒ Object
- #pre_select(scope, per_page = nil, next_cursor = nil) ⇒ Object
- #primary_key_column ⇒ Object
- #schema_execute(cql, keyspace) ⇒ Object
- #select(scope) ⇒ Object
- #select_paginated(scope) ⇒ Object
- #statement_create_with_options(stmt, options) ⇒ Object
- #update(table, id, attributes, ttl = nil) ⇒ Object
- #write(table, id, attributes, ttl) ⇒ Object
Methods inherited from AbstractAdapter
#batch, #batching?, #execute_batchable, #initialize, #statement_with_options
Constructor Details
This class inherits a constructor from CassandraObject::Adapters::AbstractAdapter
Instance Method Details
#cassandra_cluster_options ⇒ Object
63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 |
# File 'lib/cassandra_object/adapters/cassandra_schemaless_adapter.rb', line 63 def = config.slice(*[ :auth_provider, :client_cert, :compression, :compressor, :connect_timeout, :connections_per_local_node, :connections_per_remote_node, :consistency, :credentials, :futures_factory, :hosts, :load_balancing_policy, :logger, :page_size, :passphrase, :password, :port, :private_key, :protocol_version, :reconnection_policy, :retry_policy, :schema_refresh_delay, :schema_refresh_timeout, :server_cert, :ssl, :timeout, :trace, :username, :heartbeat_interval, :idle_timeout ]) { load_balancing_policy: 'Cassandra::LoadBalancing::Policies::%s', reconnection_policy: 'Cassandra::Reconnection::Policies::%s', retry_policy: 'Cassandra::Retry::Policies::%s' }.each do |policy_key, class_template| params = [policy_key] if params if params.is_a?(Hash) [policy_key] = (class_template % [params[:policy].classify]).constantize.new(*params[:params]||[]) else [policy_key] = (class_template % [params.classify]).constantize.new end end end # Setting defaults .merge!({ heartbeat_interval: .keys.include?(:heartbeat_interval) ? [:heartbeat_interval] : 30, idle_timeout: [:idle_timeout] || 60, max_schema_agreement_wait: 1, consistency: [:consistency] || :one, protocol_version: [:protocol_version] || 3, page_size: [:page_size] || 10000 }) end |
#cassandra_version ⇒ Object
268 269 270 |
# File 'lib/cassandra_object/adapters/cassandra_schemaless_adapter.rb', line 268 def cassandra_version @cassandra_version ||= execute('select release_version from system.local').rows.first['release_version'].to_f end |
#connection ⇒ Object
124 125 126 127 128 129 |
# File 'lib/cassandra_object/adapters/cassandra_schemaless_adapter.rb', line 124 def connection @connection ||= begin cluster = Cassandra.cluster cluster.connect config[:keyspace] end end |
#consistency ⇒ Object
/SCHEMA
274 275 276 |
# File 'lib/cassandra_object/adapters/cassandra_schemaless_adapter.rb', line 274 def consistency defined?(@consistency) ? @consistency : nil end |
#consistency=(val) ⇒ Object
278 279 280 |
# File 'lib/cassandra_object/adapters/cassandra_schemaless_adapter.rb', line 278 def consistency=(val) @consistency = val end |
#create_ids_where_clause(ids) ⇒ Object
323 324 325 326 327 328 |
# File 'lib/cassandra_object/adapters/cassandra_schemaless_adapter.rb', line 323 def create_ids_where_clause(ids) return ids if ids.empty? ids = ids.first if ids.is_a?(Array) && ids.one? sql = ids.is_a?(Array) ? "#{primary_key_column} IN (#{ids.map { |id| "'#{id}'" }.join(',')})" : "#{primary_key_column} = ?" return sql end |
#create_table(table_name, params = {}) ⇒ Object
SCHEMA
242 243 244 245 246 247 248 249 250 251 |
# File 'lib/cassandra_object/adapters/cassandra_schemaless_adapter.rb', line 242 def create_table(table_name, params = {}) stmt = "CREATE TABLE #{table_name} (" + 'key text,' + 'column1 text,' + 'value text,' + 'PRIMARY KEY (key, column1)' + ')' # WITH COMPACT STORAGE schema_execute (stmt, params[:options]), config[:keyspace] end |
#delete(table, ids) ⇒ Object
223 224 225 226 227 228 229 |
# File 'lib/cassandra_object/adapters/cassandra_schemaless_adapter.rb', line 223 def delete(table, ids) ids = [ids] if !ids.is_a?(Array) arguments = nil arguments = ids if ids.size == 1 statement = "DELETE FROM #{table} WHERE #{create_ids_where_clause(ids)}" #.gsub('?', ids.map { |id| "'#{id}'" }.join(',')) execute(statement, arguments) end |
#drop_table(table_name, confirm = false) ⇒ Object
253 254 255 256 257 258 259 260 |
# File 'lib/cassandra_object/adapters/cassandra_schemaless_adapter.rb', line 253 def drop_table(table_name, confirm = false) count = (schema_execute "SELECT count(*) FROM #{table_name}", config[:keyspace]).rows.first['count'] if confirm || count == 0 schema_execute "DROP TABLE #{table_name}", config[:keyspace] else raise "The table #{table_name} is not empty! If you want to drop it add the option confirm = true" end end |
#execute(statement, arguments = []) ⇒ Object
131 132 133 134 135 |
# File 'lib/cassandra_object/adapters/cassandra_schemaless_adapter.rb', line 131 def execute(statement, arguments = []) ActiveSupport::Notifications.instrument('cql.cassandra_object', cql: statement) do connection.execute statement, arguments: arguments, consistency: consistency, page_size: config[:page_size] end end |
#execute_async(queries, arguments = [], per_page = nil, next_cursor = nil) ⇒ Object
137 138 139 140 141 142 143 144 145 146 147 148 |
# File 'lib/cassandra_object/adapters/cassandra_schemaless_adapter.rb', line 137 def execute_async(queries, arguments = [], per_page = nil, next_cursor = nil) per_page ||= config[:page_size] futures = queries.map { |q| ActiveSupport::Notifications.instrument('cql.cassandra_object', cql: q) do connection.execute_async q, arguments: arguments, consistency: consistency, page_size: per_page, paging_state: next_cursor end } futures.map do |future| rows = future.get rows end end |
#execute_batch(statements) ⇒ Object
231 232 233 234 235 236 237 238 239 |
# File 'lib/cassandra_object/adapters/cassandra_schemaless_adapter.rb', line 231 def execute_batch(statements) raise 'No can do' if statements.empty? batch = connection.batch do |b| statements.each do |statement| b.add(statement[:query], arguments: statement[:arguments]) end end connection.execute(batch, page_size: config[:page_size]) end |
#insert(table, id, attributes, ttl = nil) ⇒ Object
198 199 200 |
# File 'lib/cassandra_object/adapters/cassandra_schemaless_adapter.rb', line 198 def insert(table, id, attributes, ttl = nil) write(table, id, attributes, ttl) end |
#pre_select(scope, per_page = nil, next_cursor = nil) ⇒ Object
150 151 152 153 154 155 156 157 158 159 160 |
# File 'lib/cassandra_object/adapters/cassandra_schemaless_adapter.rb', line 150 def pre_select(scope, per_page = nil, next_cursor = nil) query = "SELECT DISTINCT #{primary_key_column} FROM #{scope.klass.column_family}" query << " LIMIT #{scope.limit_value}" if scope.limit_value == 1 ids = [] new_next_cursor = nil execute_async([query], nil, per_page, next_cursor).each do |item| item.rows.each { |x| ids << x[primary_key_column] } new_next_cursor = item.paging_state unless item.last_page? end return {ids: ids, new_next_cursor: new_next_cursor} end |
#primary_key_column ⇒ Object
59 60 61 |
# File 'lib/cassandra_object/adapters/cassandra_schemaless_adapter.rb', line 59 def primary_key_column 'key' end |
#schema_execute(cql, keyspace) ⇒ Object
262 263 264 265 266 |
# File 'lib/cassandra_object/adapters/cassandra_schemaless_adapter.rb', line 262 def schema_execute(cql, keyspace) schema_db = Cassandra.cluster connection = schema_db.connect keyspace connection.execute cql, consistency: consistency end |
#select(scope) ⇒ Object
162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 |
# File 'lib/cassandra_object/adapters/cassandra_schemaless_adapter.rb', line 162 def select(scope) queries = QueryBuilder.new(self, scope).to_query_async queries.compact! if queries.present? raise CassandraObject::RecordNotFound if !queries.present? arguments = scope.select_values.select{ |sv| sv != :column1 }.map(&:to_s) arguments += scope.where_values.select.each_with_index{ |_, i| i.odd? }.reject{ |c| c.empty? }.map(&:to_s) records = execute_async(queries, arguments).map do |item| # pagination elems = [] loop do item.rows.each{ |x| elems << x } break if item.last_page? item = item.next_page end elems end {results: records.flatten!} end |
#select_paginated(scope) ⇒ Object
182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 |
# File 'lib/cassandra_object/adapters/cassandra_schemaless_adapter.rb', line 182 def select_paginated(scope) queries = QueryBuilder.new(self, scope).to_query_async queries.compact! if queries.present? raise CassandraObject::RecordNotFound if !queries.present? arguments = scope.select_values.select{ |sv| sv != :column1 }.map(&:to_s) arguments += scope.where_values.select.each_with_index{ |_, i| i.odd? }.reject{ |c| c.empty? }.map(&:to_s) new_next_cursor = nil records = [] execute_async(queries, arguments, scope.limit_value, scope.next_cursor).each do |item| new_next_cursor = item.paging_state unless item.last_page? item.rows.each{ |x| records << x } end {results: records, new_next_cursor: new_next_cursor} end |
#statement_create_with_options(stmt, options) ⇒ Object
282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 |
# File 'lib/cassandra_object/adapters/cassandra_schemaless_adapter.rb', line 282 def (stmt, ) if !.nil? stmt, else # standard if cassandra_version < 3 "#{stmt} WITH COMPACT STORAGE AND bloom_filter_fp_chance = 0.001 AND CLUSTERING ORDER BY (column1 ASC) AND caching = '{\"keys\":\"ALL\", \"rows_per_partition\":\"NONE\"}' AND comment = '' AND compaction = {'min_sstable_size': '52428800', 'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy'} AND compression = {'chunk_length_kb': '64', 'sstable_compression': 'org.apache.cassandra.io.compress.LZ4Compressor'} AND dclocal_read_repair_chance = 0.0 AND default_time_to_live = 0 AND gc_grace_seconds = 864000 AND max_index_interval = 2048 AND memtable_flush_period_in_ms = 0 AND min_index_interval = 128 AND read_repair_chance = 1.0 AND speculative_retry = 'NONE';" else "#{stmt} WITH read_repair_chance = 0.0 AND dclocal_read_repair_chance = 0.1 AND gc_grace_seconds = 864000 AND bloom_filter_fp_chance = 0.01 AND caching = { 'keys' : 'ALL', 'rows_per_partition' : 'NONE' } AND comment = '' AND compaction = { 'class' : 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold' : 32, 'min_threshold' : 4 } AND compression = { 'chunk_length_in_kb' : 64, 'class' : 'org.apache.cassandra.io.compress.LZ4Compressor' } AND default_time_to_live = 0 AND speculative_retry = '99PERCENTILE' AND min_index_interval = 128 AND max_index_interval = 2048 AND crc_check_chance = 1.0; " end end end |
#update(table, id, attributes, ttl = nil) ⇒ Object
202 203 204 |
# File 'lib/cassandra_object/adapters/cassandra_schemaless_adapter.rb', line 202 def update(table, id, attributes, ttl = nil) write(table, id, attributes, ttl) end |
#write(table, id, attributes, ttl) ⇒ Object
206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 |
# File 'lib/cassandra_object/adapters/cassandra_schemaless_adapter.rb', line 206 def write(table, id, attributes, ttl) queries = [] # puts attributes attributes.each do |column, value| if !value.nil? query = "INSERT INTO #{table} (#{primary_key_column},column1,value) VALUES (?,?,?)" query += " USING TTL #{ttl.to_s}" if !ttl.nil? args = [id.to_s, column.to_s, value.to_s] queries << {query: query, arguments: args} else queries << {query: "DELETE FROM #{table} WHERE #{primary_key_column} = ? AND column1= ?", arguments: [id.to_s, column.to_s]} end end execute_batchable(queries) end |