Class: CassandraObject::Adapters::CassandraSchemalessAdapter
- Inherits:
-
AbstractAdapter
- Object
- AbstractAdapter
- CassandraObject::Adapters::CassandraSchemalessAdapter
- Defined in:
- lib/cassandra_object/adapters/cassandra_schemaless_adapter.rb
Defined Under Namespace
Classes: QueryBuilder
Instance Attribute Summary
Attributes inherited from AbstractAdapter
Instance Method Summary collapse
- #cassandra_cluster_options ⇒ Object
- #cassandra_version ⇒ Object
- #connection ⇒ Object
-
#consistency ⇒ Object
/SCHEMA.
- #consistency=(val) ⇒ Object
- #create_ids_where_clause(ids) ⇒ Object
-
#create_table(table_name, params = {}) ⇒ Object
SCHEMA.
- #delete(table, ids) ⇒ Object
- #drop_table(table_name, confirm = false) ⇒ Object
- #execute(statement, arguments = []) ⇒ Object
- #execute_async(queries, arguments = [], per_page = nil, next_cursor = nil) ⇒ Object
- #execute_batch(statements) ⇒ Object
- #insert(table, id, attributes, ttl = nil) ⇒ Object
- #pre_select(scope, per_page = nil, next_cursor = nil) ⇒ Object
- #primary_key_column ⇒ Object
- #schema_execute(cql, keyspace) ⇒ Object
- #select(scope) ⇒ Object
- #select_paginated(scope) ⇒ Object
- #statement_create_with_options(stmt, options) ⇒ Object
- #update(table, id, attributes, ttl = nil) ⇒ Object
- #write(table, id, attributes, ttl) ⇒ Object
Methods inherited from AbstractAdapter
#batch, #batching?, #execute_batchable, #initialize, #statement_with_options
Constructor Details
This class inherits a constructor from CassandraObject::Adapters::AbstractAdapter
Instance Method Details
#cassandra_cluster_options ⇒ Object
64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 |
# File 'lib/cassandra_object/adapters/cassandra_schemaless_adapter.rb', line 64 def = config.slice(*[ :auth_provider, :client_cert, :compression, :compressor, :connect_timeout, :connections_per_local_node, :connections_per_remote_node, :consistency, :credentials, :futures_factory, :hosts, :load_balancing_policy, :logger, :page_size, :passphrase, :password, :port, :private_key, :protocol_version, :reconnection_policy, :retry_policy, :schema_refresh_delay, :schema_refresh_timeout, :server_cert, :ssl, :timeout, :trace, :username ]) { load_balancing_policy: 'Cassandra::LoadBalancing::Policies::%s', reconnection_policy: 'Cassandra::Reconnection::Policies::%s', retry_policy: 'Cassandra::Retry::Policies::%s' }.each do |policy_key, class_template| if [policy_key] [policy_key] = (class_template % [policy_key.classify]).constantize end end # Setting defaults .merge!({ max_schema_agreement_wait: 1, consistency: [:consistency]||:quorum, protocol_version: [:protocol_version]||3, page_size: [:page_size] || 10000 }) return end |
#cassandra_version ⇒ Object
263 264 265 |
# File 'lib/cassandra_object/adapters/cassandra_schemaless_adapter.rb', line 263 def cassandra_version @cassandra_version ||= execute('select release_version from system.local').rows.first['release_version'].to_f end |
#connection ⇒ Object
118 119 120 121 122 123 |
# File 'lib/cassandra_object/adapters/cassandra_schemaless_adapter.rb', line 118 def connection @connection ||= begin cluster = Cassandra.cluster cluster.connect config[:keyspace] end end |
#consistency ⇒ Object
/SCHEMA
269 270 271 |
# File 'lib/cassandra_object/adapters/cassandra_schemaless_adapter.rb', line 269 def consistency defined?(@consistency) ? @consistency : nil end |
#consistency=(val) ⇒ Object
273 274 275 |
# File 'lib/cassandra_object/adapters/cassandra_schemaless_adapter.rb', line 273 def consistency=(val) @consistency = val end |
#create_ids_where_clause(ids) ⇒ Object
318 319 320 321 322 323 |
# File 'lib/cassandra_object/adapters/cassandra_schemaless_adapter.rb', line 318 def create_ids_where_clause(ids) return ids if ids.empty? ids = ids.first if ids.is_a?(Array) && ids.one? sql = ids.is_a?(Array) ? "#{primary_key_column} IN (#{ids.map { |id| "'#{id}'" }.join(',')})" : "#{primary_key_column} = ?" return sql end |
#create_table(table_name, params = {}) ⇒ Object
SCHEMA
237 238 239 240 241 242 243 244 245 246 |
# File 'lib/cassandra_object/adapters/cassandra_schemaless_adapter.rb', line 237 def create_table(table_name, params = {}) stmt = "CREATE TABLE #{table_name} (" + 'key text,' + 'column1 text,' + 'value text,' + 'PRIMARY KEY (key, column1)' + ')' # WITH COMPACT STORAGE schema_execute (stmt, params[:options]), config[:keyspace] end |
#delete(table, ids) ⇒ Object
218 219 220 221 222 223 224 |
# File 'lib/cassandra_object/adapters/cassandra_schemaless_adapter.rb', line 218 def delete(table, ids) ids = [ids] if !ids.is_a?(Array) arguments = nil arguments = ids if ids.size == 1 statement = "DELETE FROM #{table} WHERE #{create_ids_where_clause(ids)}" #.gsub('?', ids.map { |id| "'#{id}'" }.join(',')) execute(statement, arguments) end |
#drop_table(table_name, confirm = false) ⇒ Object
248 249 250 251 252 253 254 255 |
# File 'lib/cassandra_object/adapters/cassandra_schemaless_adapter.rb', line 248 def drop_table(table_name, confirm = false) count = (schema_execute "SELECT count(*) FROM #{table_name}", config[:keyspace]).rows.first['count'] if confirm || count == 0 schema_execute "DROP TABLE #{table_name}", config[:keyspace] else raise "The table #{table_name} is not empty! If you want to drop it add the option confirm = true" end end |
#execute(statement, arguments = []) ⇒ Object
125 126 127 128 129 |
# File 'lib/cassandra_object/adapters/cassandra_schemaless_adapter.rb', line 125 def execute(statement, arguments = []) ActiveSupport::Notifications.instrument('cql.cassandra_object', cql: statement) do connection.execute statement, arguments: arguments, consistency: consistency, page_size: config[:page_size] end end |
#execute_async(queries, arguments = [], per_page = nil, next_cursor = nil) ⇒ Object
131 132 133 134 135 136 137 138 139 140 141 142 |
# File 'lib/cassandra_object/adapters/cassandra_schemaless_adapter.rb', line 131 def execute_async(queries, arguments = [], per_page = nil, next_cursor = nil) per_page ||= config[:page_size] futures = queries.map { |q| ActiveSupport::Notifications.instrument('cql.cassandra_object', cql: q) do connection.execute_async q, arguments: arguments, consistency: consistency, page_size: per_page, paging_state: next_cursor end } futures.map do |future| rows = future.get rows end end |
#execute_batch(statements) ⇒ Object
226 227 228 229 230 231 232 233 234 |
# File 'lib/cassandra_object/adapters/cassandra_schemaless_adapter.rb', line 226 def execute_batch(statements) raise 'No can do' if statements.empty? batch = connection.batch do |b| statements.each do |statement| b.add(statement[:query], arguments: statement[:arguments]) end end connection.execute(batch, page_size: config[:page_size]) end |
#insert(table, id, attributes, ttl = nil) ⇒ Object
192 193 194 |
# File 'lib/cassandra_object/adapters/cassandra_schemaless_adapter.rb', line 192 def insert(table, id, attributes, ttl = nil) write(table, id, attributes, ttl) end |
#pre_select(scope, per_page = nil, next_cursor = nil) ⇒ Object
144 145 146 147 148 149 150 151 152 153 154 |
# File 'lib/cassandra_object/adapters/cassandra_schemaless_adapter.rb', line 144 def pre_select(scope, per_page = nil, next_cursor = nil) query = "SELECT DISTINCT #{primary_key_column} FROM #{scope.klass.column_family}" query << " LIMIT #{scope.limit_value}" if scope.limit_value == 1 ids = [] new_next_cursor = nil execute_async([query], nil, per_page, next_cursor).each do |item| item.rows.each { |x| ids << x[primary_key_column] } new_next_cursor = item.paging_state unless item.last_page? end return {ids: ids, new_next_cursor: new_next_cursor} end |
#primary_key_column ⇒ Object
60 61 62 |
# File 'lib/cassandra_object/adapters/cassandra_schemaless_adapter.rb', line 60 def primary_key_column 'key' end |
#schema_execute(cql, keyspace) ⇒ Object
257 258 259 260 261 |
# File 'lib/cassandra_object/adapters/cassandra_schemaless_adapter.rb', line 257 def schema_execute(cql, keyspace) schema_db = Cassandra.cluster connection = schema_db.connect keyspace connection.execute cql, consistency: consistency end |
#select(scope) ⇒ Object
156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 |
# File 'lib/cassandra_object/adapters/cassandra_schemaless_adapter.rb', line 156 def select(scope) queries = QueryBuilder.new(self, scope).to_query_async queries.compact! if queries.present? raise CassandraObject::RecordNotFound if !queries.present? arguments = scope.select_values.select{ |sv| sv != :column1 }.map(&:to_s) arguments += scope.where_values.select.each_with_index{ |_, i| i.odd? }.reject{ |c| c.empty? }.map(&:to_s) records = execute_async(queries, arguments).map do |item| # pagination elems = [] loop do item.rows.each{ |x| elems << x } break if item.last_page? item = item.next_page end elems end {results: records.flatten!} end |
#select_paginated(scope) ⇒ Object
176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 |
# File 'lib/cassandra_object/adapters/cassandra_schemaless_adapter.rb', line 176 def select_paginated(scope) queries = QueryBuilder.new(self, scope).to_query_async queries.compact! if queries.present? raise CassandraObject::RecordNotFound if !queries.present? arguments = scope.select_values.select{ |sv| sv != :column1 }.map(&:to_s) arguments += scope.where_values.select.each_with_index{ |_, i| i.odd? }.reject{ |c| c.empty? }.map(&:to_s) new_next_cursor = nil records = [] execute_async(queries, arguments, scope.limit_value, scope.next_cursor).each do |item| new_next_cursor = item.paging_state unless item.last_page? item.rows.each{ |x| records << x } end return {results: records, new_next_cursor: new_next_cursor} end |
#statement_create_with_options(stmt, options) ⇒ Object
277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 |
# File 'lib/cassandra_object/adapters/cassandra_schemaless_adapter.rb', line 277 def (stmt, ) if !.nil? stmt, else # standard if cassandra_version < 3 "#{stmt} WITH COMPACT STORAGE AND bloom_filter_fp_chance = 0.001 AND CLUSTERING ORDER BY (column1 ASC) AND caching = '{\"keys\":\"ALL\", \"rows_per_partition\":\"NONE\"}' AND comment = '' AND compaction = {'min_sstable_size': '52428800', 'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy'} AND compression = {'chunk_length_kb': '64', 'sstable_compression': 'org.apache.cassandra.io.compress.LZ4Compressor'} AND dclocal_read_repair_chance = 0.0 AND default_time_to_live = 0 AND gc_grace_seconds = 864000 AND max_index_interval = 2048 AND memtable_flush_period_in_ms = 0 AND min_index_interval = 128 AND read_repair_chance = 1.0 AND speculative_retry = 'NONE';" else "#{stmt} WITH read_repair_chance = 0.0 AND dclocal_read_repair_chance = 0.1 AND gc_grace_seconds = 864000 AND bloom_filter_fp_chance = 0.01 AND caching = { 'keys' : 'ALL', 'rows_per_partition' : 'NONE' } AND comment = '' AND compaction = { 'class' : 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold' : 32, 'min_threshold' : 4 } AND compression = { 'chunk_length_in_kb' : 64, 'class' : 'org.apache.cassandra.io.compress.LZ4Compressor' } AND default_time_to_live = 0 AND speculative_retry = '99PERCENTILE' AND min_index_interval = 128 AND max_index_interval = 2048 AND crc_check_chance = 1.0; " end end end |
#update(table, id, attributes, ttl = nil) ⇒ Object
196 197 198 |
# File 'lib/cassandra_object/adapters/cassandra_schemaless_adapter.rb', line 196 def update(table, id, attributes, ttl = nil) write(table, id, attributes, ttl) end |
#write(table, id, attributes, ttl) ⇒ Object
200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 |
# File 'lib/cassandra_object/adapters/cassandra_schemaless_adapter.rb', line 200 def write(table, id, attributes, ttl) queries = [] # puts attributes attributes.each do |column, value| if !value.nil? query = "INSERT INTO #{table} (#{primary_key_column},column1,value) VALUES (?,?,?)" query += " USING TTL #{ttl.to_s}" if !ttl.nil? args = [id.to_s, column.to_s, value.to_s] queries << {query: query, arguments: args} else queries << {query: "DELETE FROM #{table} WHERE #{primary_key_column} = ? AND column1= ?", arguments: [id.to_s, column.to_s]} end end execute_batchable(queries) end |