Class: Pallets::Backends::Redis
- Defined in:
- lib/pallets/backends/redis.rb
Constant Summary collapse
- QUEUE_KEY =
'queue'- RELIABILITY_QUEUE_KEY =
'reliability-queue'- RELIABILITY_SET_KEY =
'reliability-set'- RETRY_SET_KEY =
'retry-set'- GIVEN_UP_SET_KEY =
'given-up-set'- WORKFLOW_QUEUE_KEY =
'workflow-queue:%s'- JOBMASKS_KEY =
'jobmasks:%s'- JOBMASK_KEY =
'jobmask:%s'- CONTEXT_KEY =
'context:%s'- REMAINING_KEY =
'remaining:%s'
Instance Method Summary collapse
- #discard(job) ⇒ Object
- #get_context(wfid) ⇒ Object
- #give_up(wfid, job, old_job) ⇒ Object
-
#initialize(blocking_timeout:, failed_job_lifespan:, failed_job_max_count:, job_timeout:, pool_size:, **options) ⇒ Redis
constructor
A new instance of Redis.
- #pick ⇒ Object
- #reschedule_all(earlier_than) ⇒ Object
- #retry(job, old_job, at) ⇒ Object
- #run_workflow(wfid, jobs, jobmasks, context_buffer) ⇒ Object
- #save(wfid, jid, job, context_buffer) ⇒ Object
Constructor Details
#initialize(blocking_timeout:, failed_job_lifespan:, failed_job_max_count:, job_timeout:, pool_size:, **options) ⇒ Redis
Returns a new instance of Redis.
17 18 19 20 21 22 23 24 25 |
# File 'lib/pallets/backends/redis.rb', line 17 def initialize(blocking_timeout:, failed_job_lifespan:, failed_job_max_count:, job_timeout:, pool_size:, **) @blocking_timeout = blocking_timeout @failed_job_lifespan = failed_job_lifespan @failed_job_max_count = failed_job_max_count @job_timeout = job_timeout @pool = Pallets::Pool.new(pool_size) { ::Redis.new() } register_scripts end |
Instance Method Details
#discard(job) ⇒ Object
67 68 69 70 71 72 73 74 75 |
# File 'lib/pallets/backends/redis.rb', line 67 def discard(job) @pool.execute do |client| client.evalsha( @scripts['discard'], [GIVEN_UP_SET_KEY, RELIABILITY_QUEUE_KEY, RELIABILITY_SET_KEY], [Time.now.to_f, job, Time.now.to_f - @failed_job_lifespan, @failed_job_max_count] ) end end |
#get_context(wfid) ⇒ Object
41 42 43 44 45 |
# File 'lib/pallets/backends/redis.rb', line 41 def get_context(wfid) @pool.execute do |client| client.hgetall(CONTEXT_KEY % wfid) end end |
#give_up(wfid, job, old_job) ⇒ Object
77 78 79 80 81 82 83 84 85 |
# File 'lib/pallets/backends/redis.rb', line 77 def give_up(wfid, job, old_job) @pool.execute do |client| client.evalsha( @scripts['give_up'], [GIVEN_UP_SET_KEY, RELIABILITY_QUEUE_KEY, RELIABILITY_SET_KEY, JOBMASKS_KEY % wfid, WORKFLOW_QUEUE_KEY % wfid, REMAINING_KEY % wfid, CONTEXT_KEY % wfid], [Time.now.to_f, job, old_job, Time.now.to_f - @failed_job_lifespan, @failed_job_max_count] ) end end |
#pick ⇒ Object
27 28 29 30 31 32 33 34 35 36 37 38 39 |
# File 'lib/pallets/backends/redis.rb', line 27 def pick @pool.execute do |client| job = client.brpoplpush(QUEUE_KEY, RELIABILITY_QUEUE_KEY, timeout: @blocking_timeout) if job # We store the job's timeout so we know when to retry jobs that are # still on the reliability queue. We do this separately since there is # no other way to atomically BRPOPLPUSH from the main queue to a # sorted set client.zadd(RELIABILITY_SET_KEY, Time.now.to_f + @job_timeout, job) end job end end |
#reschedule_all(earlier_than) ⇒ Object
87 88 89 90 91 92 93 94 95 |
# File 'lib/pallets/backends/redis.rb', line 87 def reschedule_all(earlier_than) @pool.execute do |client| client.evalsha( @scripts['reschedule_all'], [RELIABILITY_SET_KEY, RELIABILITY_QUEUE_KEY, RETRY_SET_KEY, QUEUE_KEY], [earlier_than] ) end end |
#retry(job, old_job, at) ⇒ Object
57 58 59 60 61 62 63 64 65 |
# File 'lib/pallets/backends/redis.rb', line 57 def retry(job, old_job, at) @pool.execute do |client| client.evalsha( @scripts['retry'], [RETRY_SET_KEY, RELIABILITY_QUEUE_KEY, RELIABILITY_SET_KEY], [at, job, old_job] ) end end |
#run_workflow(wfid, jobs, jobmasks, context_buffer) ⇒ Object
97 98 99 100 101 102 103 104 105 106 107 108 109 110 |
# File 'lib/pallets/backends/redis.rb', line 97 def run_workflow(wfid, jobs, jobmasks, context_buffer) @pool.execute do |client| client.multi do jobmasks.each { |jid, jobmask| client.zadd(JOBMASK_KEY % jid, jobmask) } client.sadd(JOBMASKS_KEY % wfid, jobmasks.map { |jid, _| JOBMASK_KEY % jid }) unless jobmasks.empty? client.evalsha( @scripts['run_workflow'], [WORKFLOW_QUEUE_KEY % wfid, QUEUE_KEY, REMAINING_KEY % wfid], jobs ) client.hmset(CONTEXT_KEY % wfid, *context_buffer.to_a) unless context_buffer.empty? end end end |
#save(wfid, jid, job, context_buffer) ⇒ Object
47 48 49 50 51 52 53 54 55 |
# File 'lib/pallets/backends/redis.rb', line 47 def save(wfid, jid, job, context_buffer) @pool.execute do |client| client.evalsha( @scripts['save'], [WORKFLOW_QUEUE_KEY % wfid, QUEUE_KEY, RELIABILITY_QUEUE_KEY, RELIABILITY_SET_KEY, CONTEXT_KEY % wfid, REMAINING_KEY % wfid, JOBMASK_KEY % jid, JOBMASKS_KEY % wfid], context_buffer.to_a << job ) end end |