Class: Tapsoob::Operation::Push
- Inherits:
-
Base
- Object
- Base
- Tapsoob::Operation::Push
show all
- Defined in:
- lib/tapsoob/operation/push.rb
Instance Attribute Summary
Attributes inherited from Base
#database_url, #dump_path, #opts
Instance Method Summary
collapse
Methods inherited from Base
#add_completed_table, #apply_table_filter, #can_use_pk_partitioning?, #catch_errors, #completed_tables, #completed_tables_mutex, #data?, #db, #default_chunksize, #exclude_tables, #exiting?, factory, #format_number, #indexes_first?, #initialize, #load_table_order, #log, #parallel_workers, #resuming?, #save_table_order, #schema?, #setup_signal_trap, #store_session, #stream_state, #stream_state=, #table_filter, #table_parallel_workers
Instance Method Details
#calculate_file_line_ranges(table_name, num_workers) ⇒ Object
Calculate line ranges for file partitioning
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
|
# File 'lib/tapsoob/operation/push.rb', line 347
def calculate_file_line_ranges(table_name, num_workers)
file_path = File.join(dump_path, "data", "#{table_name}.json")
return [] unless File.exist?(file_path)
total_lines = File.foreach(file_path).count
return [[0, total_lines - 1]] if total_lines == 0 || num_workers <= 1
lines_per_worker = (total_lines.to_f / num_workers).ceil
ranges = []
(0...num_workers).each do |i|
start_line = i * lines_per_worker
end_line = [((i + 1) * lines_per_worker) - 1, total_lines - 1].min
ranges << [start_line, end_line] if start_line < total_lines
end
ranges
end
|
#fetch_local_tables_info ⇒ Object
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
|
# File 'lib/tapsoob/operation/push.rb', line 311
def fetch_local_tables_info
tables_with_counts = {}
ordered_tables = load_table_order
if ordered_tables
table_names = ordered_tables
else
table_names = Dir.glob(File.join(dump_path, "schemas", "*"))
.map { |path| File.basename(path, ".rb") }
.reject { |name| name == "table_order" } .sort
end
table_names.each do |table|
if File.exist?(File.join(dump_path, "data", "#{table}.json"))
total_rows = 0
File.readlines(File.join(dump_path, "data", "#{table}.json")).each do |line|
chunk = JSON.parse(line.strip)
total_rows += chunk["data"].size if chunk["data"]
end
tables_with_counts[table] = total_rows
else
tables_with_counts[table] = 0
end
end
apply_table_filter(tables_with_counts)
end
|
#file_prefix ⇒ Object
8
9
10
|
# File 'lib/tapsoob/operation/push.rb', line 8
def file_prefix
"push"
end
|
#local_tables_info ⇒ Object
294
295
296
|
# File 'lib/tapsoob/operation/push.rb', line 294
def local_tables_info
opts[:local_tables_info] ||= fetch_local_tables_info
end
|
#parallel? ⇒ Boolean
Disable table-level parallelization for push operations to respect foreign key dependencies. Tables must be loaded in dependency order (as specified in table_order.txt manifest) to avoid FK violations. Intra-table parallelization is still enabled and safe.
16
17
18
|
# File 'lib/tapsoob/operation/push.rb', line 16
def parallel?
false
end
|
#push_data ⇒ Object
107
108
109
110
111
112
113
114
115
116
117
118
119
120
|
# File 'lib/tapsoob/operation/push.rb', line 107
def push_data
log.info "Sending data"
log.info "#{tables.size} tables, #{format_number(record_count)} records"
Tapsoob::ProgressEvent.data_start(tables.size, record_count)
if parallel?
push_data_parallel
else
push_data_serial
end
Tapsoob::ProgressEvent.data_complete(tables.size, record_count)
end
|
#push_data_from_file(stream, progress, total_records = nil) ⇒ Object
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
|
# File 'lib/tapsoob/operation/push.rb', line 227
def push_data_from_file(stream, progress, total_records = nil)
records_processed = 0
loop do
if exiting?
store_session
exit 0
end
row_size = 0
chunksize = stream.state[:chunksize]
begin
chunksize = Tapsoob::Utils.calculate_chunksize(chunksize) do |c|
stream.state[:chunksize] = c.to_i
encoded_data, row_size, elapsed_time = nil
d1 = c.time_delta do
encoded_data, row_size, elapsed_time = stream.fetch({ :type => "file", :source => dump_path })
end
data = nil
d2 = c.time_delta do
data = {
:state => stream.to_hash,
:checksum => Tapsoob::Utils.checksum(encoded_data).to_s,
:encoded_data => encoded_data
}
end
stream.fetch_data_to_database(data)
if row_size.positive?
records_processed += row_size
Tapsoob::ProgressEvent.table_progress(stream.table_name, records_processed, total_records) if total_records
end
self.stream_state = stream.to_hash
c.idle_secs = (d1 + d2)
elapsed_time
end
rescue Tapsoob::CorruptedData => e
next
rescue Tapsoob::DuplicatePrimaryKeyError => e
stream.verify_stream
stream = JSON.generate({ :state => stream.to_hash })
next
end
stream.state[:chunksize] = chunksize
progress.inc(1) if progress
break if stream.complete?
end
progress.finish if progress
add_completed_table(stream.table_name)
self.stream_state = {}
Tapsoob::ProgressEvent.table_complete(stream.table_name, records_processed)
end
|
#push_data_from_file_parallel(table_name, count, num_workers, parent_progress = nil) ⇒ Object
Parallel push for a single large table using file partitioning
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
|
# File 'lib/tapsoob/operation/push.rb', line 367
def push_data_from_file_parallel(table_name, count, num_workers, parent_progress = nil)
ranges = calculate_file_line_ranges(table_name, num_workers)
return if ranges.empty?
db[table_name.to_sym].truncate if @opts[:purge]
estimated_chunks = [(count.to_f / default_chunksize).ceil, 1].max
shared_progress = parent_progress ? parent_progress.create_bar(table_name.to_s, estimated_chunks) : nil
write_mutex = Mutex.new
records_processed = 0
begin
workers = (0...num_workers).map do |worker_id|
Thread.new do
start_line, end_line = ranges[worker_id]
stream = Tapsoob::DataStream::FilePartition.new(db, {
:table_name => table_name,
:chunksize => default_chunksize,
:line_range => [start_line, end_line]
}, {
:"skip-duplicates" => opts[:"skip-duplicates"] || false,
:"discard-identity" => opts[:"discard-identity"] || false,
:purge => opts[:purge] || false,
:debug => opts[:debug]
})
loop do
break if stream.complete?
begin
encoded_data, row_size, elapsed_time = stream.fetch(:type => "file", :source => dump_path)
if row_size.positive?
data = {
:state => stream.to_hash,
:checksum => Tapsoob::Utils.checksum(encoded_data).to_s,
:encoded_data => encoded_data
}
write_mutex.synchronize do
stream.fetch_data_to_database(data)
records_processed += row_size
Tapsoob::ProgressEvent.table_progress(table_name, records_processed, count)
end
shared_progress.inc(1) if shared_progress
end
rescue Tapsoob::CorruptedData => e
log.info "Worker #{worker_id}: Corrupted Data Received #{e.message}, retrying..."
next
rescue StandardError => e
log.error "Worker #{worker_id} error: #{e.message}"
log.error e.backtrace.join("\n")
raise
end
break if stream.complete?
end
end
end
workers.each(&:join)
shared_progress.finish if shared_progress
ensure
Tapsoob::ProgressEvent.table_complete(table_name, records_processed)
end
end
|
#push_data_parallel ⇒ Object
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
|
# File 'lib/tapsoob/operation/push.rb', line 164
def push_data_parallel
log.info "Using #{parallel_workers} parallel workers for table-level parallelization"
max_visible_bars = 8
multi_progress = opts[:progress] ? Tapsoob::Progress::MultiBar.new(max_visible_bars) : nil
table_queue = Queue.new
tables.each do |table_name, count|
data_file = File.join(dump_path, "data", "#{table_name}.json")
next unless File.exist?(data_file) && count > 0
table_queue << [table_name, count]
end
workers = (1..parallel_workers).map do
Thread.new do
loop do
break if table_queue.empty?
table_name, count = table_queue.pop(true) rescue break
table_workers = table_parallel_workers(table_name, count)
if table_workers > 1
info_msg = "Table #{table_name}: using #{table_workers} workers for #{format_number(count)} records"
if multi_progress
multi_progress.set_info(info_msg)
else
log.info info_msg
end
Tapsoob::ProgressEvent.table_start(table_name, count, workers: table_workers)
push_data_from_file_parallel(table_name, count, table_workers, multi_progress)
else
Tapsoob::ProgressEvent.table_start(table_name, count)
db[table_name.to_sym].truncate if @opts[:purge]
stream = Tapsoob::DataStream::Base.factory(db, {
:table_name => table_name,
:chunksize => default_chunksize
}, {
:"skip-duplicates" => opts[:"skip-duplicates"] || false,
:"discard-identity" => opts[:"discard-identity"] || false,
:purge => opts[:purge] || false,
:debug => opts[:debug]
})
estimated_chunks = [(count.to_f / default_chunksize).ceil, 1].max
progress = multi_progress ? multi_progress.create_bar(table_name.to_s, estimated_chunks) : nil
push_data_from_file(stream, progress, count)
end
end
end
end
workers.each(&:join)
multi_progress.stop if multi_progress
end
|
#push_data_serial ⇒ Object
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
|
# File 'lib/tapsoob/operation/push.rb', line 122
def push_data_serial
max_visible_bars = 8
multi_progress = opts[:progress] ? Tapsoob::Progress::MultiBar.new(max_visible_bars) : nil
tables.each do |table_name, count|
data_file = File.join(dump_path, "data", "#{table_name}.json")
next unless File.exist?(data_file) && count > 0
table_workers = table_parallel_workers(table_name, count)
if table_workers > 1
info_msg = "Table #{table_name}: using #{table_workers} workers for #{format_number(count)} records"
multi_progress.set_info(info_msg) if multi_progress
Tapsoob::ProgressEvent.table_start(table_name, count, workers: table_workers)
push_data_from_file_parallel(table_name, count, table_workers, multi_progress)
else
info_msg = "Loading #{table_name}: #{format_number(count)} records"
multi_progress.set_info(info_msg) if multi_progress
Tapsoob::ProgressEvent.table_start(table_name, count)
db[table_name.to_sym].truncate if @opts[:purge]
stream = Tapsoob::DataStream::Base.factory(db, {
:table_name => table_name,
:chunksize => default_chunksize
}, {
:"skip-duplicates" => opts[:"skip-duplicates"] || false,
:"discard-identity" => opts[:"discard-identity"] || false,
:purge => opts[:purge] || false,
:debug => opts[:debug]
})
estimated_chunks = [(count.to_f / default_chunksize).ceil, 1].max
progress = multi_progress ? multi_progress.create_bar(table_name.to_s, estimated_chunks) : nil
push_data_from_file(stream, progress, count)
end
end
multi_progress.stop if multi_progress
end
|
#push_indexes ⇒ Object
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
|
# File 'lib/tapsoob/operation/push.rb', line 38
def push_indexes
idxs = {}
table_idxs = Dir.glob(File.join(dump_path, "indexes", "*.json")).map { |path| File.basename(path, '.json') }
table_idxs.each do |table_idx|
index_file = File.join(dump_path, "indexes", "#{table_idx}.json")
idxs[table_idx] = File.readlines(index_file).map { |line| JSON.parse(line.strip) }
end
return unless idxs.size > 0
log.info "Sending indexes"
filtered_idxs = apply_table_filter(idxs).select { |table, indexes| indexes.size > 0 }
Tapsoob::ProgressEvent.indexes_start(filtered_idxs.size)
max_title_width = filtered_idxs.keys.map { |table| "#{table} indexes".length }.max || 14
filtered_idxs.each do |table, indexes|
progress = opts[:progress] ? Tapsoob::Progress::Bar.new("#{table} indexes", indexes.size, STDOUT, max_title_width) : nil
indexes.each do |idx|
migration_string = idx.is_a?(Array) ? idx.first : idx
Tapsoob::Utils.load_indexes(database_url, migration_string)
progress.inc(1) if progress
end
progress.finish if progress
end
Tapsoob::ProgressEvent.indexes_complete(filtered_idxs.size)
end
|
#push_partial_data ⇒ Object
94
95
96
97
98
99
100
101
102
103
104
105
|
# File 'lib/tapsoob/operation/push.rb', line 94
def push_partial_data
return if stream_state == {}
table_name = stream_state[:table_name]
record_count = tables[table_name.to_s]
log.info "Resuming #{table_name}, #{format_number(record_count)} records"
stream = Tapsoob::DataStream::Base.factory(db, stream_state)
chunksize = stream_state[:chunksize] || default_chunksize
estimated_chunks = [(record_count.to_f / chunksize).ceil, 1].max
progress = (opts[:progress] ? Tapsoob::Progress::Bar.new(table_name.to_s, estimated_chunks) : nil)
push_data_from_file(stream, progress)
end
|
#push_reset_sequences ⇒ Object
#record_count ⇒ Object
307
308
309
|
# File 'lib/tapsoob/operation/push.rb', line 307
def record_count
@record_count ||= local_tables_info.values.inject(0) { |a,c| a += c }
end
|
#run ⇒ Object
24
25
26
27
28
29
30
31
32
33
34
35
36
|
# File 'lib/tapsoob/operation/push.rb', line 24
def run
catch_errors do
unless resuming?
push_schema if schema?
push_indexes if indexes_first? && schema?
end
setup_signal_trap
push_partial_data if data? && resuming?
push_data if data?
push_indexes if !indexes_first? && schema?
push_reset_sequences
end
end
|
#tables ⇒ Object
298
299
300
301
302
303
304
305
|
# File 'lib/tapsoob/operation/push.rb', line 298
def tables
h = {}
local_tables_info.each do |table_name, count|
next if completed_tables.include?(table_name.to_s)
h[table_name.to_s] = count
end
h
end
|
#to_hash ⇒ Object
20
21
22
|
# File 'lib/tapsoob/operation/push.rb', line 20
def to_hash
super.merge(:local_tables_info => local_tables_info)
end
|