Module: Sequel::Spark::DatasetMethods

Includes:
UnmodifiedIdentifiers::DatasetMethods
Included in:
Hexspace::Dataset
Defined in:
lib/sequel/adapters/shared/spark.rb

Instance Method Summary collapse

Instance Method Details

#complex_expression_sql_append(sql, op, args) ⇒ Object



282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
# File 'lib/sequel/adapters/shared/spark.rb', line 282

def complex_expression_sql_append(sql, op, args)
  case op
  when :<<
    literal_append(sql, Sequel.function(:shiftleft, *args))
  when :>>
    literal_append(sql, Sequel.function(:shiftright, *args))
  when :~
    literal_append(sql, Sequel.function(:regexp, *args))
  when :'!~'
    literal_append(sql, ~Sequel.function(:regexp, *args))
  when :'~*'
    literal_append(sql, Sequel.function(:regexp, Sequel.function(:lower, args[0]), Sequel.function(:lower, args[1])))
  when :'!~*'
    literal_append(sql, ~Sequel.function(:regexp, Sequel.function(:lower, args[0]), Sequel.function(:lower, args[1])))
  else
    super
  end
end

#date_add_sql_append(sql, da) ⇒ Object



222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
# File 'lib/sequel/adapters/shared/spark.rb', line 222

def date_add_sql_append(sql, da)
  expr = da.expr
  cast_type = da.cast_type || Time

  h = Hash.new(0)
  da.interval.each do |k, v|
    h[k] = v || 0
  end

  if h[:weeks]
    h[:days] += h[:weeks] * 7
  end

  if h[:years] != 0 || h[:months] != 0
    expr = Sequel.+(expr, Sequel.function(:make_ym_interval, h[:years], h[:months]))
  end

  if h[:days] != 0 || h[:hours] != 0 || h[:minutes] != 0 || h[:seconds] != 0
    expr = Sequel.+(expr, Sequel.function(:make_dt_interval, h[:days], h[:hours], h[:minutes], h[:seconds]))
  end

  literal_append(sql, expr)
end

#deleteObject

Emulate delete by selecting all rows except the ones being deleted into a new table, drop the current table, and rename the new table to the current table name.

This is designed to minimize the changes to the tests, and is not recommended for production use.



252
253
254
# File 'lib/sequel/adapters/shared/spark.rb', line 252

def delete
  _with_temp_table
end

#insert_supports_empty_values?Boolean

Returns:

  • (Boolean)


313
314
315
# File 'lib/sequel/adapters/shared/spark.rb', line 313

def insert_supports_empty_values? 
  false
end

#literal_blob_append(sql, v) ⇒ Object



317
318
319
# File 'lib/sequel/adapters/shared/spark.rb', line 317

def literal_blob_append(sql, v)
  sql << "to_binary('" << [v].pack("m*").gsub("\n", "") << "', 'base64')"
end

#literal_falseObject



321
322
323
# File 'lib/sequel/adapters/shared/spark.rb', line 321

def literal_false
  "false"
end

#literal_string_append(sql, v) ⇒ Object



325
326
327
# File 'lib/sequel/adapters/shared/spark.rb', line 325

def literal_string_append(sql, v)
  sql << "'" << v.gsub(/(['\\])/, '\\\\\1') << "'"
end

#literal_trueObject



329
330
331
# File 'lib/sequel/adapters/shared/spark.rb', line 329

def literal_true
  "true"
end

#multi_insert_sql_strategyObject



301
302
303
# File 'lib/sequel/adapters/shared/spark.rb', line 301

def multi_insert_sql_strategy
  :values
end

#quoted_identifier_append(sql, name) ⇒ Object



305
306
307
# File 'lib/sequel/adapters/shared/spark.rb', line 305

def quoted_identifier_append(sql, name)
  sql << '`' << name.to_s.gsub('`', '``') << '`'
end

#requires_sql_standard_datetimes?Boolean

Returns:

  • (Boolean)


309
310
311
# File 'lib/sequel/adapters/shared/spark.rb', line 309

def requires_sql_standard_datetimes?
  true
end

#supports_cte?(type = :select) ⇒ Boolean

Returns:

  • (Boolean)


333
334
335
# File 'lib/sequel/adapters/shared/spark.rb', line 333

def supports_cte?(type=:select)
  type == :select
end

#supports_cte_in_subqueries?Boolean

Returns:

  • (Boolean)


337
338
339
# File 'lib/sequel/adapters/shared/spark.rb', line 337

def supports_cte_in_subqueries?
  true
end

#supports_group_cube?Boolean

Returns:

  • (Boolean)


341
342
343
# File 'lib/sequel/adapters/shared/spark.rb', line 341

def supports_group_cube?
  true
end

#supports_group_rollup?Boolean

Returns:

  • (Boolean)


345
346
347
# File 'lib/sequel/adapters/shared/spark.rb', line 345

def supports_group_rollup?
  true
end

#supports_grouping_sets?Boolean

Returns:

  • (Boolean)


349
350
351
# File 'lib/sequel/adapters/shared/spark.rb', line 349

def supports_grouping_sets?
  true
end

#supports_regexp?Boolean

Returns:

  • (Boolean)


353
354
355
# File 'lib/sequel/adapters/shared/spark.rb', line 353

def supports_regexp?
  true
end

#supports_window_functions?Boolean

Returns:

  • (Boolean)


357
358
359
# File 'lib/sequel/adapters/shared/spark.rb', line 357

def supports_window_functions?
  true
end

#update(columns) ⇒ Object



256
257
258
259
260
261
262
263
264
# File 'lib/sequel/adapters/shared/spark.rb', line 256

def update(columns)
  updated_cols = columns.keys
  other_cols = db.from(first_source_table).columns - updated_cols
  updated_vals = columns.values

  _with_temp_table do |tmp_name|
    db.from(tmp_name).insert([*updated_cols, *other_cols], select(*updated_vals, *other_cols))
  end
end

#with(name, dataset, opts = OPTS) ⇒ Object

Handle forward references in existing CTEs in the dataset by inserting this dataset before any dataset that would reference it.



363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
# File 'lib/sequel/adapters/shared/spark.rb', line 363

def with(name, dataset, opts=OPTS)
  opts = Hash[opts].merge!(:name=>name, :dataset=>dataset).freeze
  references = ReferenceExtractor.references(dataset)

  if with = @opts[:with]
    with = with.dup
    existing_references = @opts[:with_references]

    if referencing_dataset = existing_references[literal(name)]
      unless i = with.find_index{|o| o[:dataset].equal?(referencing_dataset)}
        raise Sequel::Error, "internal error finding referencing dataset"
      end

      with.insert(i, opts)

      # When not inserting dataset at the end, if both the new dataset and the
      # dataset right after it refer to the same reference, keep the reference
      # to the new dataset, so that that dataset is inserted before the new dataset
      # dataset
      existing_references = existing_references.reject do |k, v|
        references[k] && v.equal?(referencing_dataset)
      end
    else
      with << opts
    end

    # Assume we will insert the dataset at the end, so existing references have priority
    references = references.merge(existing_references)
  else
    with = [opts]
  end

  clone(:with=>with.freeze, :with_references=>references.freeze)
end