Class: SwitchmanInstJobs::JobsMigrator

Inherits:
Object
  • Object
show all
Defined in:
lib/switchman_inst_jobs/jobs_migrator.rb

Class Method Summary collapse

Class Method Details

.add_before_move_callback(proc) ⇒ Object


7
8
9
10
# File 'lib/switchman_inst_jobs/jobs_migrator.rb', line 7

def add_before_move_callback(proc)
  @before_move_callbacks ||= []
  @before_move_callbacks << proc
end

.clear_shard_cache(debug_message = nil) ⇒ Object


73
74
75
76
77
78
79
80
81
# File 'lib/switchman_inst_jobs/jobs_migrator.rb', line 73

def clear_shard_cache(debug_message = nil)
  ::Switchman.cache.clear
  Rails.logger.debug { "Waiting for caches to clear #{debug_message}" }
  # Wait a little over the 60 second in-process shard cache clearing
  # threshold to ensure that all new stranded jobs are now being
  # enqueued with next_in_strand: false
  # @skip_cache_wait is for spec usage only
  sleep(65) unless @skip_cache_wait
end

.ensure_unblock_stranded_for(shards) ⇒ Object

if :migrate_strands ran on any shards that fell into scenario 1, then block_stranded never got flipped, so do that now.


60
61
62
63
64
65
66
67
68
69
70
71
# File 'lib/switchman_inst_jobs/jobs_migrator.rb', line 60

def ensure_unblock_stranded_for(shards)
  shards = ::Switchman::Shard.where(id: shards, block_stranded: true).to_a
  return unless shards.any?

  ::Switchman::Shard.where(id: shards).update_all(block_stranded: false)
  clear_shard_cache

  # shards is an array of shard objects that is now stale cause block_stranded has been updated.
  shards.map(&:delayed_jobs_shard).uniq.each do |dj_shard|
    unblock_strands(dj_shard)
  end
end

.migrate_everythingObject


188
189
190
191
192
193
194
195
196
197
198
199
200
# File 'lib/switchman_inst_jobs/jobs_migrator.rb', line 188

def migrate_everything
  source_shard = ::Switchman::Shard.current(::Delayed::Backend::ActiveRecord::AbstractJob)
  scope = ::Delayed::Job.shard(source_shard).where(strand: nil)

  shard_map = build_shard_map(scope, source_shard)
  shard_map.each do |(target_shard, source_shard_ids)|
    batch_move_jobs(
      target_shard: target_shard,
      source_shard: source_shard,
      scope: scope.where(shard_id: source_shard_ids).where(locked_by: nil)
    )
  end
end

.migrate_shards(shard_map) ⇒ Object


26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
# File 'lib/switchman_inst_jobs/jobs_migrator.rb', line 26

def migrate_shards(shard_map)
  source_shards = Set[]
  target_shards = Hash.new([])
  shard_map.each do |(shard, target_shard)|
    shard = ::Switchman::Shard.find(shard) unless shard.is_a?(::Switchman::Shard)
    source_shards << shard.delayed_jobs_shard.id
    target_shard = target_shard.try(:id) || target_shard
    target_shards[target_shard] += [shard.id]
  end

  # Do the updates in batches and then just clear redis instead of clearing them one at a time
  target_shards.each do |target_shard, shards|
    updates = { delayed_jobs_shard_id: target_shard, block_stranded: true }
    updates[:updated_at] = Time.zone.now if ::Switchman::Shard.column_names.include?('updated_at')
    ::Switchman::Shard.where(id: shards).update_all(updates)
  end
  clear_shard_cache

  ::Switchman::Shard.clear_cache
  # rubocop:disable Style/CombinableLoops
  # We first migrate strands so that we can stop blocking strands before we migrate unstranded jobs
  source_shards.each do |s|
    ::Switchman::Shard.lookup(s).activate(::Delayed::Backend::ActiveRecord::AbstractJob) { migrate_strands }
  end

  source_shards.each do |s|
    ::Switchman::Shard.lookup(s).activate(::Delayed::Backend::ActiveRecord::AbstractJob) { migrate_everything }
  end
  ensure_unblock_stranded_for(shard_map.map(&:first))
  # rubocop:enable Style/CombinableLoops
end

.migrate_strandsObject


92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
# File 'lib/switchman_inst_jobs/jobs_migrator.rb', line 92

def migrate_strands
  # there are 4 scenarios to deal with here
  # 1) no running job, no jobs moved: do nothing
  # 2) running job, no jobs moved; create blocker with next_in_strand=false
  #    to prevent new jobs from immediately executing
  # 3) running job, jobs moved; set next_in_strand=false on the first of
  #    those (= do nothing since it should already be false)
  # 4) no running job, jobs moved: set next_in_strand=true on the first of
  #    those (= do nothing since it should already be true)

  source_shard = ::Switchman::Shard.current(::Delayed::Backend::ActiveRecord::AbstractJob)
  strand_scope = ::Delayed::Job.shard(source_shard).where.not(strand: nil)
  shard_map = build_shard_map(strand_scope, source_shard)
  shard_map.each do |(target_shard, source_shard_ids)|
    shard_scope = strand_scope.where(shard_id: source_shard_ids)

    # 1) is taken care of because it should not show up here in strands
    strands = shard_scope.distinct.order(:strand).pluck(:strand)

    target_shard.activate(::Delayed::Backend::ActiveRecord::AbstractJob) do
      strands.each do |strand|
        transaction_on([source_shard, target_shard]) do
          this_strand_scope = shard_scope.where(strand: strand)
          # we want to copy all the jobs except the one that is still running.
          jobs_scope = this_strand_scope.where(locked_by: nil)

          # 2) and part of 3) are taken care of here by creating a blocker
          # job with next_in_strand = false. as soon as the current
          # running job is finished it should set next_in_strand
          # We lock it to ensure that the jobs worker can't delete it until we are done moving the strand
          # Since we only unlock it on the new jobs queue *after* deleting from the original
          # the lock ensures the blocker always gets unlocked
          first = this_strand_scope.where.not(locked_by: nil).next_in_strand_order.lock.first
          if first
            first_job = ::Delayed::Job.create!(strand: strand, next_in_strand: false)
            first_job.payload_object = ::Delayed::PerformableMethod.new(Kernel, :sleep, args: [0])
            first_job.queue = first.queue
            first_job.tag = 'Kernel.sleep'
            first_job.source = 'JobsMigrator::StrandBlocker'
            first_job.max_attempts = 1
            # If we ever have jobs left over from 9999 jobs moves of a single shard,
            # something has gone terribly wrong
            first_job.strand_order_override = -9999
            first_job.save!
            # the rest of 3) is taken care of here
            # make sure that all the jobs moved over are NOT next in strand
            ::Delayed::Job.where(next_in_strand: true, strand: strand, locked_by: nil).
              update_all(next_in_strand: false)
          end

          # 4) is taken care of here, by leaving next_in_strand alone and
          # it should execute on the new shard
          batch_move_jobs(
            target_shard: target_shard,
            source_shard: source_shard,
            scope: jobs_scope
          ) do |job, new_job|
            # This ensures jobs enqueued on the old jobs shard run before jobs on the new jobs queue
            new_job.strand_order_override = job.strand_order_override - 1
          end
        end
      end

      updated = ::Switchman::Shard.where(id: source_shard_ids, block_stranded: true).
        update_all(block_stranded: false)
      # If this is being manually re-run for some reason to clean something up, don't wait for nothing to happen
      clear_shard_cache("(#{source_shard.id} -> #{target_shard.id})") unless updated.zero?

      ::Switchman::Shard.clear_cache
      # At this time, let's unblock all the strands on the target shard that aren't being held by a blocker
      # but actually could have run and we just didn't know it because we didn't know if they had jobs
      # on the source shard
      unblock_strands(target_shard)
    end
  end
end

.runObject

This method expects that all relevant shards already have block_stranded: true but otherwise jobs can be running normally


85
86
87
88
89
90
# File 'lib/switchman_inst_jobs/jobs_migrator.rb', line 85

def run
  # Ensure this is never run with a dirty in-memory shard cache
  ::Switchman::Shard.clear_cache
  migrate_strands
  migrate_everything
end

.transaction_on(shards, &block) ⇒ Object


12
13
14
15
16
17
18
19
20
21
22
23
24
# File 'lib/switchman_inst_jobs/jobs_migrator.rb', line 12

def transaction_on(shards, &block)
  return yield if shards.empty?

  shard = shards.pop
  current_shard = ::Switchman::Shard.current(::Delayed::Backend::ActiveRecord::AbstractJob)
  shard.activate(::Delayed::Backend::ActiveRecord::AbstractJob) do
    ::Delayed::Job.transaction do
      current_shard.activate(::Delayed::Backend::ActiveRecord::AbstractJob) do
        transaction_on(shards, &block)
      end
    end
  end
end

.unblock_strands(target_shard) ⇒ Object


169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
# File 'lib/switchman_inst_jobs/jobs_migrator.rb', line 169

def unblock_strands(target_shard)
  target_shard.activate(::Delayed::Backend::ActiveRecord::AbstractJob) do
    loop do
      # We only want to unlock stranded jobs where they don't belong to a blocked shard (if they *do* belong)
      # to a blocked shard, they must be part of a concurrent jobs migration from a different source shard to
      # this target shard, so we shouldn't unlock them yet.  We only ever unlock one job here to keep the
      # logic cleaner; if the job is n-stranded, after the first one runs, the trigger will unlock larger
      # batches
      break if ::Delayed::Job.where(id: ::Delayed::Job.select('DISTINCT ON (strand) id').
        where.not(strand: nil).
        where.not(shard_id: ::Switchman::Shard.where(block_stranded: true).pluck(:id)).where(
          ::Delayed::Job.select(1).from("#{::Delayed::Job.quoted_table_name} dj2").
          where("dj2.next_in_strand = true OR dj2.source = 'JobsMigrator::StrandBlocker'").
          where('dj2.strand = delayed_jobs.strand').arel.exists.not
        ).order(:strand, :strand_order_override, :id)).limit(500).update_all(next_in_strand: true).zero?
    end
  end
end