Class: PgOnlineSchemaChange::Orchestrate
- Inherits:
-
Object
- Object
- PgOnlineSchemaChange::Orchestrate
- Extended by:
- Helper
- Defined in:
- lib/pg_online_schema_change/orchestrate.rb
Constant Summary collapse
- SWAP_STATEMENT_TIMEOUT =
"5s"
Class Method Summary collapse
- .copy_data! ⇒ Object
- .disable_vacuum! ⇒ Object
- .drop_and_cleanup! ⇒ Object
- .handle_signals! ⇒ Object
- .replay_and_swap! ⇒ Object
- .run!(options) ⇒ Object
- .run_alter_statement! ⇒ Object
- .run_analyze! ⇒ Object
- .setup!(options) ⇒ Object
- .setup_audit_table! ⇒ Object
- .setup_shadow_table! ⇒ Object
- .setup_signals! ⇒ Object
- .setup_store ⇒ Object
- .setup_trigger! ⇒ Object
- .swap! ⇒ Object
- .validate_constraints! ⇒ Object
Methods included from Helper
logger, method_missing, primary_key, respond_to_missing?
Class Method Details
.copy_data! ⇒ Object
196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 |
# File 'lib/pg_online_schema_change/orchestrate.rb', line 196 def copy_data! # re-uses transaction with serializable # Begin the process to copy data into copy table # depending on the size of the table, this can be a time # taking operation. logger.info("Clearing contents of audit table before copy..", { shadow_table: shadow_table, parent_table: client.table }) Query.run(client.connection, "DELETE FROM #{audit_table}", true) logger.info("Copying contents..", { shadow_table: shadow_table, parent_table: client.table }) if client.copy_statement query = format(client.copy_statement, shadow_table: shadow_table) return Query.run(client.connection, query, true) end sql = Query.copy_data_statement(client, shadow_table, true) Query.run(client.connection, sql, true) ensure Query.run(client.connection, "COMMIT;") # commit the serializable transaction end |
.disable_vacuum! ⇒ Object
165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 |
# File 'lib/pg_online_schema_change/orchestrate.rb', line 165 def disable_vacuum! # re-uses transaction with serializable # Disabling vacuum to avoid any issues during the process result = Query.storage_parameters_for(client, client.table, true) || "" Store.set(:primary_table_storage_parameters, result) logger.debug("Disabling vacuum on shadow and audit table", { shadow_table: shadow_table, audit_table: audit_table }) sql = <<~SQL ALTER TABLE #{shadow_table} SET ( autovacuum_enabled = false, toast.autovacuum_enabled = false ); ALTER TABLE #{audit_table} SET ( autovacuum_enabled = false, toast.autovacuum_enabled = false ); SQL Query.run(client.connection, sql, true) end |
.drop_and_cleanup! ⇒ Object
270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 |
# File 'lib/pg_online_schema_change/orchestrate.rb', line 270 def drop_and_cleanup! primary_drop = client.drop ? "DROP TABLE IF EXISTS #{old_primary_table};" : "" audit_table_drop = audit_table ? "DROP TABLE IF EXISTS #{audit_table}" : "" shadow_table_drop = shadow_table ? "DROP TABLE IF EXISTS #{shadow_table}" : "" sql = <<~SQL DROP TRIGGER IF EXISTS primary_to_audit_table_trigger ON #{client.table}; #{audit_table_drop}; #{shadow_table_drop}; #{primary_drop} RESET statement_timeout; RESET client_min_messages; RESET lock_timeout; SQL Query.run(client.connection, sql) end |
.handle_signals! ⇒ Object
82 83 84 85 86 87 88 89 90 91 92 93 94 95 |
# File 'lib/pg_online_schema_change/orchestrate.rb', line 82 def handle_signals! reader = setup_signals! signal = reader.gets.chomp while !reader.closed? && IO.select([reader]) # rubocop:disable Lint/UnreachableLoop logger.info "Signal #{signal} received, cleaning up" client.connection.cancel drop_and_cleanup! reader.close exit Signal.list[signal] end end |
.replay_and_swap! ⇒ Object
216 217 218 219 220 221 222 |
# File 'lib/pg_online_schema_change/orchestrate.rb', line 216 def replay_and_swap! Replay.begin! rescue CountBelowDelta logger.info("Remaining rows below delta count, proceeding towards swap") swap! end |
.run!(options) ⇒ Object
45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 |
# File 'lib/pg_online_schema_change/orchestrate.rb', line 45 def run!() setup!() Thread.new { handle_signals! } raise Error, "Parent table has no primary key, exiting..." if primary_key.nil? setup_audit_table! setup_trigger! setup_shadow_table! # re-uses transaction with serializable disable_vacuum! # re-uses transaction with serializable run_alter_statement! # re-uses transaction with serializable copy_data! # re-uses transaction with serializable run_analyze! replay_and_swap! run_analyze! validate_constraints! drop_and_cleanup! logger.info("All tasks successfully completed") rescue StandardError => e logger.fatal("Something went wrong: #{e.}", { e: e }) drop_and_cleanup! raise e end |
.run_alter_statement! ⇒ Object
185 186 187 188 189 190 191 192 193 194 |
# File 'lib/pg_online_schema_change/orchestrate.rb', line 185 def run_alter_statement! # re-uses transaction with serializable statement = Query.alter_statement_for(client, shadow_table) logger.info("Running alter statement on shadow table", { shadow_table: shadow_table, parent_table: client.table }) Query.run(client.connection, statement, true) Store.set(:dropped_columns_list, Query.dropped_columns(client)) Store.set(:renamed_columns_list, Query.renamed_columns(client)) end |
.run_analyze! ⇒ Object
256 257 258 259 260 |
# File 'lib/pg_online_schema_change/orchestrate.rb', line 256 def run_analyze! logger.info("Performing ANALYZE!") Query.run(client.connection, "ANALYZE VERBOSE #{client.table};") end |
.setup!(options) ⇒ Object
12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 |
# File 'lib/pg_online_schema_change/orchestrate.rb', line 12 def setup!() client = Store.set(:client, Client.new()) sql = <<~SQL SET statement_timeout = 0; SET client_min_messages = warning; SET search_path TO #{client.schema}; SQL Query.run(client.connection, sql) # install functions Query.run(client.connection, FUNC_FIX_SERIAL_SEQUENCE) Query.run(client.connection, FUNC_CREATE_TABLE_ALL) setup_store end |
.setup_audit_table! ⇒ Object
97 98 99 100 101 102 103 104 105 |
# File 'lib/pg_online_schema_change/orchestrate.rb', line 97 def setup_audit_table! logger.info("Setting up audit table", { audit_table: audit_table }) sql = <<~SQL CREATE TABLE #{audit_table} (#{audit_table_pk} SERIAL PRIMARY KEY, #{operation_type_column} text, #{trigger_time_column} timestamp, LIKE #{client.table}); SQL Query.run(client.connection, sql) end |
.setup_shadow_table! ⇒ Object
147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 |
# File 'lib/pg_online_schema_change/orchestrate.rb', line 147 def setup_shadow_table! # re-uses transaction with serializable # This ensures that all queries from here till copy_data run with serializable. # This is to to ensure that once the trigger is added to the primay table # and contents being copied into the shadow, after a delete all on audit table, # any replaying of rows that happen next from audit table do not contain # any duplicates. We are ensuring there are no race conditions between # adding the trigger, till the copy ends, since they all happen in the # same serializable transaction. Query.run(client.connection, "SET TRANSACTION ISOLATION LEVEL SERIALIZABLE", true) logger.info("Setting up shadow table", { shadow_table: shadow_table }) Query.run(client.connection, "SELECT create_table_all('#{client.table}', '#{shadow_table}');", true) # update serials Query.run(client.connection, "SELECT fix_serial_sequence('#{client.table}', '#{shadow_table}');", true) end |
.setup_signals! ⇒ Object
72 73 74 75 76 77 78 79 80 |
# File 'lib/pg_online_schema_change/orchestrate.rb', line 72 def setup_signals! reader, writer = IO.pipe %w[TERM QUIT INT].each do |sig| trap(sig) { writer.puts sig } end reader end |
.setup_store ⇒ Object
29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 |
# File 'lib/pg_online_schema_change/orchestrate.rb', line 29 def setup_store # Set this early on to ensure their creation and cleanup (unexpected) # happens at all times. IOW, the calls from Store.get always return # the same value. Store.set(:old_primary_table, "pgosc_op_table_#{client.table}") Store.set(:audit_table, "pgosc_at_#{client.table}_#{pgosc_identifier}") Store.set(:operation_type_column, "operation_type_#{pgosc_identifier}") Store.set(:trigger_time_column, "trigger_time_#{pgosc_identifier}") Store.set(:audit_table_pk, "at_#{pgosc_identifier}_id") Store.set(:audit_table_pk_sequence, "#{audit_table}_#{audit_table_pk}_seq") Store.set(:shadow_table, "pgosc_st_#{client.table}_#{pgosc_identifier}") Store.set(:referential_foreign_key_statements, Query.referential_foreign_keys_to_refresh(client, client.table)) Store.set(:self_foreign_key_statements, Query.self_foreign_keys_to_refresh(client, client.table)) end |
.setup_trigger! ⇒ Object
107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 |
# File 'lib/pg_online_schema_change/orchestrate.rb', line 107 def setup_trigger! # acquire access exclusive lock to ensure audit triggers # are setup fine. This also calls kill_backends (if opted in via flag) # so any competing backends will be killed to setup the trigger opened = Query.open_lock_exclusive(client, client.table) raise AccessExclusiveLockNotAcquired unless opened logger.info("Setting up triggers") sql = <<~SQL DROP TRIGGER IF EXISTS primary_to_audit_table_trigger ON #{client.table}; CREATE OR REPLACE FUNCTION primary_to_audit_table_trigger() RETURNS TRIGGER AS $$ BEGIN IF ( TG_OP = 'INSERT') THEN INSERT INTO \"#{audit_table}\" select nextval(\'#{audit_table_pk_sequence}\'), 'INSERT', clock_timestamp(), NEW.* ; RETURN NEW; ELSIF ( TG_OP = 'UPDATE') THEN INSERT INTO \"#{audit_table}\" select nextval(\'#{audit_table_pk_sequence}\'), 'UPDATE', clock_timestamp(), NEW.* ; RETURN NEW; ELSIF ( TG_OP = 'DELETE') THEN INSERT INTO \"#{audit_table}\" select nextval(\'#{audit_table_pk_sequence}\'), 'DELETE', clock_timestamp(), OLD.* ; RETURN NEW; END IF; END; $$ LANGUAGE PLPGSQL SECURITY DEFINER; CREATE TRIGGER primary_to_audit_table_trigger AFTER INSERT OR UPDATE OR DELETE ON #{client.table} FOR EACH ROW EXECUTE PROCEDURE primary_to_audit_table_trigger(); SQL Query.run(client.connection, sql, opened) ensure Query.run(client.connection, "COMMIT;") end |
.swap! ⇒ Object
224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 |
# File 'lib/pg_online_schema_change/orchestrate.rb', line 224 def swap! logger.info("Performing swap!") storage_params_reset = primary_table_storage_parameters.empty? ? "" : "ALTER TABLE #{client.table} SET (#{primary_table_storage_parameters});" # From here on, all statements are carried out in a single # transaction with access exclusive lock opened = Query.open_lock_exclusive(client, client.table) raise AccessExclusiveLockNotAcquired unless opened Query.run(client.connection, "SET statement_timeout to '#{SWAP_STATEMENT_TIMEOUT}';", opened) rows = Replay.rows_to_play(opened) Replay.play!(rows, opened) sql = <<~SQL ALTER TABLE #{client.table} RENAME to #{old_primary_table}; ALTER TABLE #{shadow_table} RENAME to #{client.table}; #{referential_foreign_key_statements} #{self_foreign_key_statements} #{storage_params_reset} DROP TRIGGER IF EXISTS primary_to_audit_table_trigger ON #{client.table}; SQL Query.run(client.connection, sql, opened) ensure Query.run(client.connection, "COMMIT;") Query.run(client.connection, "SET statement_timeout = 0;") end |
.validate_constraints! ⇒ Object
262 263 264 265 266 267 268 |
# File 'lib/pg_online_schema_change/orchestrate.rb', line 262 def validate_constraints! logger.info("Validating constraints!") validate_statements = Query.get_foreign_keys_to_validate(client, client.table) Query.run(client.connection, validate_statements) end |