Class: PgOnlineSchemaChange::Orchestrate

Inherits:
Object
  • Object
show all
Extended by:
Helper
Defined in:
lib/pg_online_schema_change/orchestrate.rb

Constant Summary collapse

SWAP_STATEMENT_TIMEOUT =
"5s"
TRACK_PROGRESS_INTERVAL =

seconds

60

Class Method Summary collapse

Methods included from Helper

logger, method_missing, primary_key, respond_to_missing?

Class Method Details

.copy_data!Object



204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
# File 'lib/pg_online_schema_change/orchestrate.rb', line 204

def copy_data!
  # re-uses transaction with serializable
  # Begin the process to copy data into copy table
  # depending on the size of the table, this can be a time
  # taking operation.
  logger.info(
    "Clearing contents of audit table before copy..",
    { shadow_table: shadow_table, parent_table: client.table_name },
  )
  Query.run(client.connection, "DELETE FROM #{audit_table}", true)

  if client.copy_statement
    query = format(client.copy_statement, shadow_table: shadow_table)
    return Query.run(client.connection, query, true)
  end

  logger.info(
    "Copying contents..",
    { shadow_table: shadow_table, parent_table: client.table_name },
  )

  @copy_finished = false
  log_progress

  sql = Query.copy_data_statement(client, shadow_table, true)
  Query.run(client.connection, sql, true)
ensure
  Query.run(client.connection, "COMMIT;") # commit the serializable transaction
  @copy_finished = true
end

.drop_and_cleanup!Object



339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
# File 'lib/pg_online_schema_change/orchestrate.rb', line 339

def drop_and_cleanup!
  primary_drop = client.drop ? "DROP TABLE IF EXISTS #{old_primary_table};" : ""
  audit_table_drop = audit_table ? "DROP TABLE IF EXISTS #{audit_table}" : ""
  shadow_table_drop = shadow_table ? "DROP TABLE IF EXISTS #{shadow_table}" : ""

  sql = <<~SQL
    DROP TRIGGER IF EXISTS primary_to_audit_table_trigger ON #{client.table_name};
    #{audit_table_drop};
    #{shadow_table_drop};
    #{primary_drop}
    RESET statement_timeout;
    RESET client_min_messages;
    RESET lock_timeout;
  SQL

  Query.run(client.connection, sql)
end

.handle_signals!Object



96
97
98
99
100
101
102
103
104
105
106
107
108
109
# File 'lib/pg_online_schema_change/orchestrate.rb', line 96

def handle_signals!
  reader = setup_signals!
  signal = reader.gets.chomp

  while !reader.closed? && reader.wait_readable # rubocop:disable Lint/UnreachableLoop
    logger.info("Signal #{signal} received, cleaning up")

    client.connection.cancel
    drop_and_cleanup!
    reader.close

    exit(Signal.list[signal])
  end
end

.log_progressObject



235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
# File 'lib/pg_online_schema_change/orchestrate.rb', line 235

def log_progress
  new_connection = client.checkout_connection
  source_table_size = Query.get_table_size(new_connection, client.schema, client.table_name)

  Thread.new do
    loop do
      break if @copy_finished

      shadow_table_size = Query.get_table_size(new_connection, client.schema, shadow_table)
      progress = (shadow_table_size.to_f / source_table_size) * 100
      logger.info("Estimated copy progress: #{progress.round(2)}% complete")

      break if @copy_finished || progress >= 100
      sleep(TRACK_PROGRESS_INTERVAL) unless ENV["CI"]
    rescue StandardError => e
      logger.info("Reporting progress failed: #{e.message}")
    end
  end
end

.replace_views!Object



324
325
326
327
328
329
330
331
332
333
334
335
336
337
# File 'lib/pg_online_schema_change/orchestrate.rb', line 324

def replace_views!
  view_definitions = Query.view_definitions_for(client, old_primary_table)
  view_definitions.each do |definition|
    definition.each do |view_name, view_definition|
      view_definition = view_definition.gsub(old_primary_table, client.table)

      logger.info("Replacing view #{view_name}")
      sql = <<~SQL
        CREATE OR REPLACE VIEW #{view_name} AS #{view_definition}
      SQL
      Query.run(client.connection, sql)
    end
  end
end

.replay_and_swap!Object



255
256
257
258
259
260
261
# File 'lib/pg_online_schema_change/orchestrate.rb', line 255

def replay_and_swap!
  Replay.begin!
rescue CountBelowDelta
  logger.info("Remaining rows below delta count, proceeding towards swap")

  swap!
end

.run!(options) ⇒ Object



56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
# File 'lib/pg_online_schema_change/orchestrate.rb', line 56

def run!(options)
  setup!(options)
  Thread.new { handle_signals! }

  raise Error, "Parent table has no primary key, exiting..." if primary_key.nil?

  logger.info("Performing some house keeping....")
  run_analyze!
  run_vacuum!

  setup_audit_table!

  setup_trigger!
  setup_shadow_table! # re-uses transaction with serializable
  run_alter_statement! # re-uses transaction with serializable
  copy_data! # re-uses transaction with serializable
  run_analyze!
  replay_and_swap!
  run_analyze!
  validate_constraints!
  replace_views!
  drop_and_cleanup!

  logger.info("All tasks successfully completed")
rescue StandardError => e
  logger.fatal("Something went wrong: #{e.message}", { e: e })

  drop_and_cleanup!

  raise e
end

.run_alter_statement!Object



191
192
193
194
195
196
197
198
199
200
201
202
# File 'lib/pg_online_schema_change/orchestrate.rb', line 191

def run_alter_statement!
  # re-uses transaction with serializable
  statement = Query.alter_statement_for(client, shadow_table)
  logger.info(
    "Running alter statement on shadow table",
    { shadow_table: shadow_table, parent_table: client.table_name },
  )
  Query.run(client.connection, statement, true)

  Store.set(:dropped_columns_list, Query.dropped_columns(client))
  Store.set(:renamed_columns_list, Query.renamed_columns(client))
end

.run_analyze!Object



301
302
303
304
305
# File 'lib/pg_online_schema_change/orchestrate.rb', line 301

def run_analyze!
  logger.info("Performing ANALYZE!")

  client.connection.async_exec("ANALYZE VERBOSE #{client.schema}.#{client.table_name};")
end

.run_vacuum!Object



307
308
309
310
311
# File 'lib/pg_online_schema_change/orchestrate.rb', line 307

def run_vacuum!
  logger.info("Performing VACUUM!")

  client.connection.async_exec("VACUUM VERBOSE #{client.schema}.#{client.table_name};")
end

.setup!(options) ⇒ Object



13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
# File 'lib/pg_online_schema_change/orchestrate.rb', line 13

def setup!(options)
  client = Store.set(:client, Client.new(options))

  sql = <<~SQL
    SET statement_timeout = 0;
    SET client_min_messages = warning;
    SET search_path TO #{client.schema};
  SQL

  Query.run(client.connection, sql)
  # install functions
  Query.run(client.connection, FUNC_FIX_SERIAL_SEQUENCE)
  Query.run(client.connection, FUNC_CREATE_TABLE_ALL)

  setup_store
end

.setup_audit_table!Object



111
112
113
114
115
116
117
118
119
120
121
122
123
124
# File 'lib/pg_online_schema_change/orchestrate.rb', line 111

def setup_audit_table!
  logger.info("Setting up audit table", { audit_table: audit_table })

  sql = <<~SQL
    CREATE TABLE #{audit_table} (#{audit_table_pk} SERIAL PRIMARY KEY, #{operation_type_column} text, #{trigger_time_column} timestamp, LIKE #{client.table_name}) WITH (autovacuum_enabled = false);
  SQL

  Query.run(client.connection, sql)

  Store.set(
    :audit_table_pk_sequence,
    Query.get_sequence_name(client, audit_table, audit_table_pk),
  )
end

.setup_shadow_table!Object



166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
# File 'lib/pg_online_schema_change/orchestrate.rb', line 166

def setup_shadow_table!
  logger.info("Setting up shadow table", { shadow_table: shadow_table })
  Query.run(
    client.connection,
    "SELECT create_table_all('#{client.table_name}', '#{shadow_table}');",
  )

  # re-uses transaction with serializable
  # This ensures that all queries from here till copy_data run with serializable.
  # This is to to ensure that once the trigger is added to the primay table
  # and contents being copied into the shadow, after a delete all on audit table,
  # any replaying of rows that happen next from audit table do not contain
  # any duplicates. We are ensuring there are no race conditions between
  # adding the trigger, till the copy ends, since they all happen in the
  # same serializable transaction.
  Query.run(client.connection, "SET TRANSACTION ISOLATION LEVEL SERIALIZABLE", true)

  # update serials
  Query.run(
    client.connection,
    "SELECT fix_serial_sequence('#{client.table_name}', '#{shadow_table}');",
    true,
  )
end

.setup_signals!Object



88
89
90
91
92
93
94
# File 'lib/pg_online_schema_change/orchestrate.rb', line 88

def setup_signals!
  reader, writer = IO.pipe

  ['TERM', 'QUIT', 'INT'].each { |sig| trap(sig) { writer.puts sig } }

  reader
end

.setup_storeObject



30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
# File 'lib/pg_online_schema_change/orchestrate.rb', line 30

def setup_store
  # Set this early on to ensure their creation and cleanup (unexpected)
  # happens at all times. IOW, the calls from Store.get always return
  # the same value.
  Store.set(:old_primary_table, "pgosc_op_table_#{client.table.downcase}")
  Store.set(:audit_table, "pgosc_at_#{client.table.downcase}_#{pgosc_identifier}")
  Store.set(:operation_type_column, "operation_type_#{pgosc_identifier}")
  Store.set(:trigger_time_column, "trigger_time_#{pgosc_identifier}")
  Store.set(:audit_table_pk, "at_#{pgosc_identifier}_id")
  Store.set(:shadow_table, "pgosc_st_#{client.table.downcase}_#{pgosc_identifier}")
  Store.set(
    :primary_table_storage_parameters,
    Query.storage_parameters_for(client, client.table_name, true) || "",
  )

  Store.set(
    :referential_foreign_key_statements,
    Query.referential_foreign_keys_to_refresh(client, client.table_name),
  )
  Store.set(
    :self_foreign_key_statements,
    Query.self_foreign_keys_to_refresh(client, client.table_name),
  )
  Store.set(:trigger_statements, Query.get_triggers_for(client, client.table_name))
end

.setup_trigger!Object



126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
# File 'lib/pg_online_schema_change/orchestrate.rb', line 126

def setup_trigger!
  # acquire access exclusive lock to ensure audit triggers
  # are setup fine. This also calls kill_backends (if opted in via flag)
  # so any competing backends will be killed to setup the trigger
  opened = Query.open_lock_exclusive(client, client.table_name)

  raise AccessExclusiveLockNotAcquired unless opened

  logger.info("Setting up triggers")

  sql = <<~SQL
    DROP TRIGGER IF EXISTS primary_to_audit_table_trigger ON #{client.table_name};

    CREATE OR REPLACE FUNCTION primary_to_audit_table_trigger()
    RETURNS TRIGGER AS
    $$
    BEGIN
      IF ( TG_OP = 'INSERT') THEN
        INSERT INTO "#{audit_table}" select nextval('#{audit_table_pk_sequence}'), 'INSERT', clock_timestamp(), NEW.* ;
        RETURN NEW;
      ELSIF ( TG_OP = 'UPDATE') THEN
        INSERT INTO "#{audit_table}" select nextval('#{audit_table_pk_sequence}'), 'UPDATE', clock_timestamp(),  NEW.* ;
        RETURN NEW;
      ELSIF ( TG_OP = 'DELETE') THEN
        INSERT INTO "#{audit_table}" select nextval('#{audit_table_pk_sequence}'), 'DELETE', clock_timestamp(), OLD.* ;
        RETURN NEW;
      END IF;
    END;
    $$ LANGUAGE PLPGSQL SECURITY DEFINER;

    CREATE TRIGGER primary_to_audit_table_trigger
    AFTER INSERT OR UPDATE OR DELETE ON #{client.table_name}
    FOR EACH ROW EXECUTE PROCEDURE primary_to_audit_table_trigger();
  SQL

  Query.run(client.connection, sql, opened)
ensure
  Query.run(client.connection, "COMMIT;")
end

.swap!Object



263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
# File 'lib/pg_online_schema_change/orchestrate.rb', line 263

def swap!
  logger.info("Performing swap!")
  # From here on, all statements are carried out in a single
  # transaction with access exclusive lock

  opened = Query.open_lock_exclusive(client, client.table_name)

  raise AccessExclusiveLockNotAcquired unless opened

  Query.run(
    client.connection,
    "SET statement_timeout to '#{SWAP_STATEMENT_TIMEOUT}';",
    opened,
  )

  rows = Replay.rows_to_play(opened)
  Replay.play!(rows, opened)

  query_for_primary_key_refresh =
    Query.query_for_primary_key_refresh(shadow_table, primary_key, client.table_name, opened)

  sql = <<~SQL
    #{query_for_primary_key_refresh};
    ALTER TABLE #{client.table_name} RENAME to #{old_primary_table};
    ALTER TABLE #{shadow_table} RENAME to #{client.table_name};
    #{referential_foreign_key_statements}
    #{self_foreign_key_statements}
    #{trigger_statements}
    #{storage_params_reset}
    DROP TRIGGER IF EXISTS primary_to_audit_table_trigger ON #{client.table_name};
  SQL

  Query.run(client.connection, sql, opened)
ensure
  Query.run(client.connection, "COMMIT;")
  Query.run(client.connection, "SET statement_timeout = 0;")
end

.validate_constraints!Object



313
314
315
316
317
318
319
320
321
322
# File 'lib/pg_online_schema_change/orchestrate.rb', line 313

def validate_constraints!
  return if client.skip_foreign_key_validation

  Query
    .get_foreign_keys_to_validate(client, client.table_name)
    .each do |statement|
      logger.info("Validating constraints!", statement: statement)
      Query.run(client.connection, statement)
    end
end