Class: Dynamoid::AdapterPlugin::AwsSdkV2

Inherits:
Object
  • Object
show all
Defined in:
lib/dynamoid/adapter_plugin/aws_sdk_v2.rb

Overview

The AwsSdkV2 adapter provides support for the aws-sdk version 2 for ruby.

Defined Under Namespace

Classes: ItemUpdater, Table

Constant Summary collapse

EQ =
'EQ'.freeze
RANGE_MAP =
{
    range_greater_than: 'GT',
    range_less_than:    'LT',
    range_gte:          'GE',
    range_lte:          'LE',
    range_begins_with:  'BEGINS_WITH',
    range_between:      'BETWEEN',
    range_eq:           'EQ'
}
FIELD_MAP =

Don’t implement NULL and NOT_NULL because it doesn’t make seanse - we declare schema in models

{
    eq:           'EQ',
    gt:           'GT',
    lt:           'LT',
    gte:          'GE',
    lte:          'LE',
    begins_with:  'BEGINS_WITH',
    between:      'BETWEEN',
    in:           'IN',
    contains:     'CONTAINS',
    not_contains: 'NOT_CONTAINS'
}
HASH_KEY =
'HASH'.freeze
RANGE_KEY =
'RANGE'.freeze
STRING_TYPE =
'S'.freeze
NUM_TYPE =
'N'.freeze
BINARY_TYPE =
'B'.freeze
TABLE_STATUSES =
{
    creating: 'CREATING',
    updating: 'UPDATING',
    deleting: 'DELETING',
    active: 'ACTIVE'
}.freeze
PARSE_TABLE_STATUS =
->(resp, lookup = :table) {
  # lookup is table for describe_table API
  # lookup is table_description for create_table API
  #   because Amazon, damnit.
  resp.send(lookup).table_status
}
BATCH_WRITE_ITEM_REQUESTS_LIMIT =
25

Instance Attribute Summary collapse

Instance Method Summary collapse

Instance Attribute Details

#table_cacheObject (readonly)

Returns the value of attribute table_cache.



50
51
52
# File 'lib/dynamoid/adapter_plugin/aws_sdk_v2.rb', line 50

def table_cache
  @table_cache
end

Instance Method Details

#batch_delete_item(options) ⇒ Object

Delete many items at once from DynamoDB. More efficient than delete each item individually.

or

Dynamoid::AdapterPlugin::AwsSdkV2.batch_delete_item('table1' => [['hk1', 'rk2'], ['hk1', 'rk2']]]))

See:

TODO handle rejections because of internal processing failures

Examples:

Delete IDs 1 and 2 from the table testtable

Dynamoid::AdapterPlugin::AwsSdk.batch_delete_item('table1' => ['1', '2'])

Parameters:

  • options (Hash)

    the hash of tables and IDs to delete



191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
# File 'lib/dynamoid/adapter_plugin/aws_sdk_v2.rb', line 191

def batch_delete_item(options)
  requests = []

  options.each_pair do |table_name, ids|
    table = describe_table(table_name)

    ids.each_slice(BATCH_WRITE_ITEM_REQUESTS_LIMIT) do |sliced_ids|
      delete_requests = sliced_ids.map { |id|
        {delete_request: {key: key_stanza(table, *id)}}
      }

      requests << {table_name => delete_requests}
    end
  end

  begin
    requests.map do |request_items|
      client.batch_write_item(
        request_items: request_items,
        return_consumed_capacity: 'TOTAL',
        return_item_collection_metrics: 'SIZE')
    end
  rescue Aws::DynamoDB::Errors::ConditionalCheckFailedException => e
    raise Dynamoid::Errors::ConditionalCheckFailedException, e
  end
end

#batch_get_item(table_ids, options = {}) ⇒ Hash

Get many items at once from DynamoDB. More efficient than getting each item individually.

@todo: Provide support for passing options to underlying batch_get_item docs.aws.amazon.com/sdkforruby/api/Aws/DynamoDB/Client.html#batch_get_item-instance_method

Examples:

Retrieve IDs 1 and 2 from the table testtable

Dynamoid::AdapterPlugin::AwsSdkV2.batch_get_item({'table1' => ['1', '2']})

Parameters:

  • table_ids (Hash)

    the hash of tables and IDs to retrieve

  • options (Hash) (defaults to: {})

    to be passed to underlying BatchGet call

Returns:

  • (Hash)

    a hash where keys are the table names and the values are the retrieved items

Since:

  • 1.0.0



135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
# File 'lib/dynamoid/adapter_plugin/aws_sdk_v2.rb', line 135

def batch_get_item(table_ids, options = {})
  request_items = Hash.new{|h, k| h[k] = []}
  return request_items if table_ids.all?{|k, v| v.blank?}

  ret = Hash.new([].freeze) # Default for tables where no rows are returned

  table_ids.each do |t, ids|
    next if ids.blank?
    tbl = describe_table(t)
    hk  = tbl.hash_key.to_s
    rng = tbl.range_key.to_s

    Array(ids).each_slice(Dynamoid::Config.batch_size) do |ids|
      request_items = Hash.new{|h, k| h[k] = []}

      keys = if rng.present?
        Array(ids).map do |h, r|
          { hk => h, rng => r }
        end
      else
        Array(ids).map do |id|
          { hk => id }
        end
      end

      request_items[t] = {
        keys: keys
      }

      results = client.batch_get_item(
        request_items: request_items
      )

      results.data[:responses].each do |table, rows|
        ret[table] += rows.collect { |r| result_item_to_hash(r) }
      end
    end
  end

  ret
end

#batch_write_item(table_name, objects, options = {}) ⇒ Object

Puts or deletes multiple items in one or more tables

See:

TODO handle rejections because of exceeding limit for the whole request - 16 MB, item size limit - 400 KB or because provisioned throughput is exceeded

Parameters:

  • table_name (String)

    the name of the table

  • items (Array)

    to be processed

  • additional (Hash)

    options



98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
# File 'lib/dynamoid/adapter_plugin/aws_sdk_v2.rb', line 98

def batch_write_item table_name, objects, options = {}
  requests = []

  objects.each_slice(BATCH_WRITE_ITEM_REQUESTS_LIMIT) do |os|
    requests << os.map { |o| { put_request: { item: o } } }
  end

  begin
    requests.each do |request_items|
      client.batch_write_item(
        {
          request_items: {
            table_name => request_items,
          },
          return_consumed_capacity: 'TOTAL',
          return_item_collection_metrics: 'SIZE'
        }.merge!(options)
      )
    end
  rescue Aws::DynamoDB::Errors::ConditionalCheckFailedException => e
    raise Dynamoid::Errors::ConditionalCheckFailedException, e
  end
end

#clientObject

Return the client object.

Since:

  • 1.0.0



82
83
84
# File 'lib/dynamoid/adapter_plugin/aws_sdk_v2.rb', line 82

def client
  @client
end

#connect!Aws::DynamoDB::Client

Establish the connection to DynamoDB.

Returns:

  • (Aws::DynamoDB::Client)

    the DynamoDB connection



55
56
57
58
# File 'lib/dynamoid/adapter_plugin/aws_sdk_v2.rb', line 55

def connect!
  @client = Aws::DynamoDB::Client.new(connection_config)
  @table_cache = {}
end

#connection_configObject



60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
# File 'lib/dynamoid/adapter_plugin/aws_sdk_v2.rb', line 60

def connection_config
  @connection_hash = {}

  if Dynamoid::Config.endpoint?
    @connection_hash[:endpoint] = Dynamoid::Config.endpoint
  end
  if Dynamoid::Config.access_key?
    @connection_hash[:access_key_id] = Dynamoid::Config.access_key
  end
  if Dynamoid::Config.secret_key?
    @connection_hash[:secret_access_key] = Dynamoid::Config.secret_key
  end
  if Dynamoid::Config.region?
    @connection_hash[:region] = Dynamoid::Config.region
  end

  @connection_hash
end

#count(table_name) ⇒ Object



672
673
674
# File 'lib/dynamoid/adapter_plugin/aws_sdk_v2.rb', line 672

def count(table_name)
  describe_table(table_name, true).item_count
end

#create_table(table_name, key = :id, options = {}) ⇒ Object

Create a table on DynamoDB. This usually takes a long time to complete.

Parameters:

  • table_name (String)

    the name of the table to create

  • key (Symbol) (defaults to: :id)

    the table’s primary key (defaults to :id)

  • options (Hash) (defaults to: {})

    provide a range key here if the table has a composite key

Options Hash (options):

Since:

  • 1.0.0



228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
# File 'lib/dynamoid/adapter_plugin/aws_sdk_v2.rb', line 228

def create_table(table_name, key = :id, options = {})
  Dynamoid.logger.info "Creating #{table_name} table. This could take a while."
  read_capacity = options[:read_capacity] || Dynamoid::Config.read_capacity
  write_capacity = options[:write_capacity] || Dynamoid::Config.write_capacity

  secondary_indexes = options.slice(
    :local_secondary_indexes,
    :global_secondary_indexes
  )
  ls_indexes = options[:local_secondary_indexes]
  gs_indexes = options[:global_secondary_indexes]

  key_schema = {
    hash_key_schema: { key => (options[:hash_key_type] || :string) },
    range_key_schema: options[:range_key]
  }
  attribute_definitions = build_all_attribute_definitions(
    key_schema,
    secondary_indexes
  )
  key_schema = aws_key_schema(
    key_schema[:hash_key_schema],
    key_schema[:range_key_schema]
  )

  client_opts = {
    table_name: table_name,
    provisioned_throughput: {
      read_capacity_units: read_capacity,
      write_capacity_units: write_capacity
    },
    key_schema: key_schema,
    attribute_definitions: attribute_definitions
  }

  if ls_indexes.present?
    client_opts[:local_secondary_indexes] = ls_indexes.map do |index|
      index_to_aws_hash(index)
    end
  end

  if gs_indexes.present?
    client_opts[:global_secondary_indexes] = gs_indexes.map do |index|
      index_to_aws_hash(index)
    end
  end
  resp = client.create_table(client_opts)
  options[:sync] = true if !options.has_key?(:sync) && ls_indexes.present? || gs_indexes.present?
  until_past_table_status(table_name, :creating) if options[:sync] &&
      (status = PARSE_TABLE_STATUS.call(resp, :table_description)) &&
      status == TABLE_STATUSES[:creating]
  # Response to original create_table, which, if options[:sync]
  #   may have an outdated table_description.table_status of "CREATING"
  resp
rescue Aws::DynamoDB::Errors::ResourceInUseException => e
  Dynamoid.logger.error "Table #{table_name} cannot be created as it already exists"
end

#create_table_synchronously(table_name, key = :id, options = {}) ⇒ Object

Create a table on DynamoDB synchronously. This usually takes a long time to complete. CreateTable is normally an asynchronous operation. You can optionally define secondary indexes on the new table,

as part of the CreateTable operation.

If you want to create multiple tables with secondary indexes on them,

you must create the tables sequentially.

Only one table with secondary indexes can be

in the CREATING state at any given time.

See: docs.aws.amazon.com/sdkforruby/api/Aws/DynamoDB/Client.html#create_table-instance_method

Parameters:

  • table_name (String)

    the name of the table to create

  • key (Symbol) (defaults to: :id)

    the table’s primary key (defaults to :id)

  • options (Hash) (defaults to: {})

    provide a range key here if the table has a composite key

Options Hash (options):

Since:

  • 1.2.0



304
305
306
# File 'lib/dynamoid/adapter_plugin/aws_sdk_v2.rb', line 304

def create_table_synchronously(table_name, key = :id, options = {})
  create_table(table_name, key, options.merge(sync: true))
end

#delete_item(table_name, key, options = {}) ⇒ Object

Removes an item from DynamoDB.

@todo: Provide support for various options docs.aws.amazon.com/sdkforruby/api/Aws/DynamoDB/Client.html#delete_item-instance_method

Parameters:

  • table_name (String)

    the name of the table

  • key (String)

    the hash key of the item to delete

  • options (Hash) (defaults to: {})

    provide a range key here if the table has a composite key

Since:

  • 1.0.0



317
318
319
320
321
322
323
324
325
326
327
328
329
# File 'lib/dynamoid/adapter_plugin/aws_sdk_v2.rb', line 317

def delete_item(table_name, key, options = {})
  options ||= {}
  range_key = options[:range_key]
  conditions = options[:conditions]
  table = describe_table(table_name)
  client.delete_item(
    table_name: table_name,
    key: key_stanza(table, key, range_key),
    expected: expected_stanza(conditions)
  )
rescue Aws::DynamoDB::Errors::ConditionalCheckFailedException => e
  raise Dynamoid::Errors::ConditionalCheckFailedException, e
end

#delete_table(table_name, options = {}) ⇒ Object

Deletes an entire table from DynamoDB.

Parameters:

  • table_name (String)

    the name of the table to destroy

  • options (Hash) (defaults to: {})

    a customizable set of options

Options Hash (options):

  • sync (Boolean)

    Wait for table status check to raise ResourceNotFoundException

Since:

  • 1.0.0



337
338
339
340
341
342
343
344
345
346
# File 'lib/dynamoid/adapter_plugin/aws_sdk_v2.rb', line 337

def delete_table(table_name, options = {})
  resp = client.delete_table(table_name: table_name)
  until_past_table_status(table_name, :deleting) if options[:sync] &&
      (status = PARSE_TABLE_STATUS.call(resp, :table_description)) &&
      status == TABLE_STATUSES[:deleting]
  table_cache.delete(table_name)
rescue Aws::DynamoDB::Errors::ResourceInUseException => e
  Dynamoid.logger.error "Table #{table_name} cannot be deleted as it is in use"
  raise e
end

#delete_table_synchronously(table_name, options = {}) ⇒ Object



348
349
350
# File 'lib/dynamoid/adapter_plugin/aws_sdk_v2.rb', line 348

def delete_table_synchronously(table_name, options = {})
  delete_table(table_name, options.merge(sync: true))
end

#get_item(table_name, key, options = {}) ⇒ Hash

Fetches an item from DynamoDB.

Parameters:

  • table_name (String)

    the name of the table

  • key (String)

    the hash key of the item to find

  • options (Hash) (defaults to: {})

    provide a range key here if the table has a composite key

Returns:

  • (Hash)

    a hash representing the raw item in DynamoDB

Since:

  • 1.0.0



365
366
367
368
369
370
371
372
373
374
# File 'lib/dynamoid/adapter_plugin/aws_sdk_v2.rb', line 365

def get_item(table_name, key, options = {})
  options ||= {}
  table    = describe_table(table_name)
  range_key = options.delete(:range_key)

  item = client.get_item(table_name: table_name,
    key: key_stanza(table, key, range_key)
  )[:item]
  item ? result_item_to_hash(item) : nil
end

#list_tablesObject

List all tables on DynamoDB.

Since:

  • 1.0.0



411
412
413
# File 'lib/dynamoid/adapter_plugin/aws_sdk_v2.rb', line 411

def list_tables
  client.list_tables[:table_names]
end

#put_item(table_name, object, options = {}) ⇒ Object

Parameters:

  • table_name (String)

    the name of the table

  • object (Object)

    a hash or Dynamoid object to persist

Since:

  • 1.0.0



423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
# File 'lib/dynamoid/adapter_plugin/aws_sdk_v2.rb', line 423

def put_item(table_name, object, options = {})
  item = {}
  options ||= {}

  object.each do |k, v|
    next if v.nil? || ((v.is_a?(Set) || v.is_a?(String)) && v.empty?)
    item[k.to_s] = v
  end

  begin
    client.put_item(
      {
        table_name: table_name,
        item: item,
        expected: expected_stanza(options)
      }.merge!(options)
    )
  rescue Aws::DynamoDB::Errors::ConditionalCheckFailedException => e
    raise Dynamoid::Errors::ConditionalCheckFailedException, e
  end
end

#query(table_name, opts = {}) ⇒ Enumerable

Query the DynamoDB table. This employs DynamoDB’s indexes so is generally faster than scanning, but is only really useful for range queries, since it can only find by one hash key at once. Only provide one range key to the hash.

Parameters:

  • table_name (String)

    the name of the table

  • opts (Hash) (defaults to: {})

    the options to query the table with

Options Hash (opts):

  • :hash_value (String)

    the value of the hash key to find

  • :range_between (Number, Number)

    find the range key within this range

  • :range_greater_than (Number)

    find range keys greater than this

  • :range_less_than (Number)

    find range keys less than this

  • :range_gte (Number)

    find range keys greater than or equal to this

  • :range_lte (Number)

    find range keys less than or equal to this

Returns:

  • (Enumerable)

    matching items

Since:

  • 1.0.0



463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
# File 'lib/dynamoid/adapter_plugin/aws_sdk_v2.rb', line 463

def query(table_name, opts = {})
  table = describe_table(table_name)
  hk    = (opts[:hash_key].present? ? opts.delete(:hash_key) : table.hash_key).to_s
  rng   = (opts[:range_key].present? ? opts.delete(:range_key) : table.range_key).to_s
  q     = opts.slice(
            :consistent_read,
            :scan_index_forward,
            :select,
            :index_name
          )

  opts.delete(:consistent_read)
  opts.delete(:scan_index_forward)
  opts.delete(:select)
  opts.delete(:index_name)

  # Deal with various limits and batching
  record_limit = opts.delete(:record_limit)
  scan_limit = opts.delete(:scan_limit)
  batch_size = opts.delete(:batch_size)
  limit = [record_limit, scan_limit, batch_size].compact.min
  q[:limit] = limit if limit

  opts.delete(:next_token).tap do |token|
    break unless token
    q[:exclusive_start_key] = {
      hk  => token[:hash_key_element],
      rng => token[:range_key_element]
    }
    # For secondary indices the start key must contain the indices composite key
    # but also the table's composite keys
    q[:exclusive_start_key][table.hash_key] = token[:table_hash_key_element] if token[:table_hash_key_element]
    q[:exclusive_start_key][table.range_key] = token[:table_range_key_element] if token[:table_range_key_element]
  end

  key_conditions = {
    hk => {
      # TODO: Provide option for other operators like NE, IN, LE, etc
      comparison_operator: EQ,
      attribute_value_list: [
        opts.delete(:hash_value).freeze
      ]
    }
  }

  opts.each_pair do |k, v|
    # TODO: ATM, only few comparison operators are supported, provide support for all operators
    next unless(op = RANGE_MAP[k])
    key_conditions[rng] = {
      comparison_operator: op,
      attribute_value_list: [
        opts.delete(k).freeze
      ].flatten # Flatten as BETWEEN operator specifies array of two elements
    }
  end

  query_filter = {}
  opts.reject {|k, _| k.in? RANGE_MAP.keys}.each do |attr, hash|
    query_filter[attr] = {
      comparison_operator: FIELD_MAP[hash.keys[0]],
      attribute_value_list: [
        hash.values[0].freeze
      ].flatten # Flatten as BETWEEN operator specifies array of two elements
    }
  end

  q[:table_name]     = table_name
  q[:key_conditions] = key_conditions
  q[:query_filter]   = query_filter

  Enumerator.new { |y|
    record_count = 0
    scan_count = 0
    loop do
      # Adjust the limit down if the remaining record and/or scan limit are
      # lower to obey limits. We can assume the difference won't be
      # negative due to break statements below but choose smaller limit
      # which is why we have 2 separate if statements.
      # NOTE: Adjusting based on record_limit can cause many HTTP requests
      # being made. We may want to change this behavior, but it affects
      # filtering on data with potentially large gaps.
      # Example:
      #    User.where('created_at.gte' => 1.day.ago).record_limit(1000)
      #    Records 1-999 User's that fit criteria
      #    Records 1000-2000 Users's that do not fit criteria
      #    Record 2001 fits criteria
      # The underlying implementation will have 1 page for records 1-999
      # then will request with limit 1 for records 1000-2000 (making 1000
      # requests of limit 1) until hit record 2001.
      if q[:limit] && record_limit && record_limit - record_count < q[:limit]
        q[:limit] = record_limit - record_count
      end
      if q[:limit] && scan_limit && scan_limit - scan_count < q[:limit]
        q[:limit] = scan_limit - scan_count
      end

      results = client.query(q)
      results.items.each { |row| y << result_item_to_hash(row) }

      record_count += results.items.size
      break if record_limit && record_count >= record_limit

      scan_count += results.scanned_count
      break if scan_limit && scan_count >= scan_limit

      if(lk = results.last_evaluated_key)
        q[:exclusive_start_key] = lk
      else
        break
      end
    end
  }
end

#scan(table_name, scan_hash = {}, select_opts = {}) ⇒ Enumerable

Scan the DynamoDB table. This is usually a very slow operation as it naively filters all data on the DynamoDB servers.

@todo: Provide support for various options docs.aws.amazon.com/sdkforruby/api/Aws/DynamoDB/Client.html#scan-instance_method

Parameters:

  • table_name (String)

    the name of the table

  • scan_hash (Hash) (defaults to: {})

    a hash of attributes: matching records will be returned by the scan

Returns:

  • (Enumerable)

    matching items

Since:

  • 1.0.0



588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
# File 'lib/dynamoid/adapter_plugin/aws_sdk_v2.rb', line 588

def scan(table_name, scan_hash = {}, select_opts = {})
  request = { table_name: table_name }
  request[:consistent_read] = true if select_opts.delete(:consistent_read)

  # Deal with various limits and batching
  record_limit = select_opts.delete(:record_limit)
  scan_limit = select_opts.delete(:scan_limit)
  batch_size = select_opts.delete(:batch_size)
  request_limit = [record_limit, scan_limit, batch_size].compact.min
  request[:limit] = request_limit if request_limit

  if scan_hash.present?
    request[:scan_filter] = scan_hash.reduce({}) do |memo, (attr, cond)|
      # Flatten as BETWEEN operator specifies array of two elements
      memo.merge(attr.to_s => {
        comparison_operator: FIELD_MAP[cond.keys[0]],
        attribute_value_list: [cond.values[0].freeze].flatten
      })
    end
  end

  Enumerator.new do |y|
    record_count = 0
    scan_count = 0
    loop do
      # Adjust the limit down if the remaining record and/or scan limit are
      # lower to obey limits. We can assume the difference won't be
      # negative due to break statements below but choose smaller limit
      # which is why we have 2 separate if statements.
      # NOTE: Adjusting based on record_limit can cause many HTTP requests
      # being made. We may want to change this behavior, but it affects
      # filtering on data with potentially large gaps.
      # Example:
      #    User.where('created_at.gte' => 1.day.ago).record_limit(1000)
      #    Records 1-999 User's that fit criteria
      #    Records 1000-2000 Users's that do not fit criteria
      #    Record 2001 fits criteria
      # The underlying implementation will have 1 page for records 1-999
      # then will request with limit 1 for records 1000-2000 (making 1000
      # requests of limit 1) until hit record 2001.
      if request[:limit] && record_limit && record_limit - record_count < request[:limit]
        request[:limit] = record_limit - record_count
      end
      if request[:limit] && scan_limit && scan_limit - scan_count < request[:limit]
        request[:limit] = scan_limit - scan_count
      end

      results = client.scan(request)
      results.items.each { |row| y << result_item_to_hash(row) }

      record_count += results.items.size
      break if record_limit && record_count >= record_limit

      scan_count += results.scanned_count
      break if scan_limit && scan_count >= scan_limit

      # Keep pulling if we haven't finished paging in all data
      if(lk = results[:last_evaluated_key])
        request[:exclusive_start_key] = lk
      else
        break
      end
    end
  end
end

#truncate(table_name) ⇒ Object

Truncates all records in the given table

Parameters:

  • table_name (String)

    the name of the table

Since:

  • 1.0.0



660
661
662
663
664
665
666
667
668
669
670
# File 'lib/dynamoid/adapter_plugin/aws_sdk_v2.rb', line 660

def truncate(table_name)
  table = describe_table(table_name)
  hk    = table.hash_key
  rk    = table.range_key

  scan(table_name, {}, {}).each do |attributes|
    opts = {}
    opts[:range_key] = attributes[rk.to_sym] if rk
    delete_item(table_name, attributes[hk], opts)
  end
end

#update_item(table_name, key, options = {}) {|iu = ItemUpdater.new(table, key, range_key)| ... } ⇒ Object

Edits an existing item’s attributes, or adds a new item to the table if it does not already exist. You can put, delete, or add attribute values

Parameters:

  • table_name (String)

    the name of the table

  • key (String)

    the hash key of the item to find

  • options (Hash) (defaults to: {})

    provide a range key here if the table has a composite key

Yields:

Returns:

  • new attributes for the record



385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
# File 'lib/dynamoid/adapter_plugin/aws_sdk_v2.rb', line 385

def update_item(table_name, key, options = {})
  range_key = options.delete(:range_key)
  conditions = options.delete(:conditions)
  table = describe_table(table_name)

  yield(iu = ItemUpdater.new(table, key, range_key))

  raise "non-empty options: #{options}" unless options.empty?
  begin
    result = client.update_item(table_name: table_name,
      key: key_stanza(table, key, range_key),
      attribute_updates: iu.to_h,
      expected: expected_stanza(conditions),
      return_values: 'ALL_NEW'
    )
    result_item_to_hash(result[:attributes])
  rescue Aws::DynamoDB::Errors::ConditionalCheckFailedException => e
    raise Dynamoid::Errors::ConditionalCheckFailedException, e
  end
end