Class: AWS::Flow::FlowConstants

Inherits:
Object
  • Object
show all
Defined in:
lib/aws/decider/flow_defaults.rb

Overview

Constants used by the AWS Flow Framework for Ruby.

Constant Summary collapse

DATA_LIMIT =
32768
REASON_LIMIT =

Number of chars that can fit in FlowException’s reason

256
DETAILS_LIMIT =

Number of chars that can fit in FlowException’s details. Same as DATA_LIMIT

DATA_LIMIT
TRUNCATION_OVERHEAD =

This is the truncation overhead for serialization.

8000
TRUNCATED =

Truncation string added to the end of a trucated string“

"[TRUNCATED]"
INFINITY =
-1
RETENTION_DEFAULT =
7
NUM_OF_WORKERS_DEFAULT =
1

Class Attribute Summary collapse

Instance Attribute Summary collapse

Class Method Summary collapse

Class Attribute Details

.default_data_converterObject (readonly)

Returns the value of attribute default_data_converter.



96
97
98
# File 'lib/aws/decider/flow_defaults.rb', line 96

def default_data_converter
  @default_data_converter
end

.exponential_retry_backoff_coefficientObject (readonly)

Returns the value of attribute exponential_retry_backoff_coefficient.



96
97
98
# File 'lib/aws/decider/flow_defaults.rb', line 96

def exponential_retry_backoff_coefficient
  @exponential_retry_backoff_coefficient
end

.exponential_retry_exceptions_to_excludeObject (readonly)

Returns the value of attribute exponential_retry_exceptions_to_exclude.



96
97
98
# File 'lib/aws/decider/flow_defaults.rb', line 96

def exponential_retry_exceptions_to_exclude
  @exponential_retry_exceptions_to_exclude
end

.exponential_retry_exceptions_to_includeObject (readonly)

Returns the value of attribute exponential_retry_exceptions_to_include.



96
97
98
# File 'lib/aws/decider/flow_defaults.rb', line 96

def exponential_retry_exceptions_to_include
  @exponential_retry_exceptions_to_include
end

.exponential_retry_functionObject (readonly)

Returns the value of attribute exponential_retry_function.



96
97
98
# File 'lib/aws/decider/flow_defaults.rb', line 96

def exponential_retry_function
  @exponential_retry_function
end

.exponential_retry_initial_retry_intervalObject (readonly)

Returns the value of attribute exponential_retry_initial_retry_interval.



96
97
98
# File 'lib/aws/decider/flow_defaults.rb', line 96

def exponential_retry_initial_retry_interval
  @exponential_retry_initial_retry_interval
end

.exponential_retry_maximum_attemptsObject (readonly)

Returns the value of attribute exponential_retry_maximum_attempts.



96
97
98
# File 'lib/aws/decider/flow_defaults.rb', line 96

def exponential_retry_maximum_attempts
  @exponential_retry_maximum_attempts
end

.exponential_retry_maximum_retry_interval_secondsObject (readonly)

Returns the value of attribute exponential_retry_maximum_retry_interval_seconds.



96
97
98
# File 'lib/aws/decider/flow_defaults.rb', line 96

def exponential_retry_maximum_retry_interval_seconds
  @exponential_retry_maximum_retry_interval_seconds
end

.exponential_retry_retry_expiration_secondsObject (readonly)

Returns the value of attribute exponential_retry_retry_expiration_seconds.



96
97
98
# File 'lib/aws/decider/flow_defaults.rb', line 96

def exponential_retry_retry_expiration_seconds
  @exponential_retry_retry_expiration_seconds
end

.jitter_functionObject (readonly)

Returns the value of attribute jitter_function.



96
97
98
# File 'lib/aws/decider/flow_defaults.rb', line 96

def jitter_function
  @jitter_function
end

.should_jitterObject (readonly)

Returns the value of attribute should_jitter.



96
97
98
# File 'lib/aws/decider/flow_defaults.rb', line 96

def should_jitter
  @should_jitter
end

.use_worker_task_listObject (readonly)

Returns the value of attribute use_worker_task_list.



96
97
98
# File 'lib/aws/decider/flow_defaults.rb', line 96

def use_worker_task_list
  @use_worker_task_list
end

Instance Attribute Details

#default_data_converterObject (readonly)

The DataConverter used to interpret results from Amazon SWF.

Returns:



93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
# File 'lib/aws/decider/flow_defaults.rb', line 93

class FlowConstants

  class << self
    attr_reader :exponential_retry_maximum_retry_interval_seconds, :exponential_retry_retry_expiration_seconds, :exponential_retry_backoff_coefficient, :exponential_retry_maximum_attempts, :exponential_retry_function, :default_data_converter, :exponential_retry_exceptions_to_include, :exponential_retry_exceptions_to_exclude, :jitter_function, :should_jitter, :exponential_retry_initial_retry_interval, :use_worker_task_list
  end

  # Sizes taken from
  # http://docs.aws.amazon.com/amazonswf/latest/apireference/API_FailWorkflowExecutionDecisionAttributes.html
  DATA_LIMIT = 32768

  # Number of chars that can fit in FlowException's reason
  REASON_LIMIT = 256
  # Number of chars that can fit in FlowException's details. Same as
  # DATA_LIMIT
  DETAILS_LIMIT = DATA_LIMIT
  # This is the truncation overhead for serialization.
  TRUNCATION_OVERHEAD = 8000
  # Truncation string added to the end of a trucated string"
  TRUNCATED = "[TRUNCATED]"

  INFINITY = -1
  RETENTION_DEFAULT = 7
  NUM_OF_WORKERS_DEFAULT = 1

  def self.defaults
    {
      domain: "FlowDefault",
      prefix_name: "FlowDefaultWorkflowRuby",
      execution_method: "start",
      version: "1.0",
      # execution timeout (1 hour)
      execution_start_to_close_timeout: "3600",
      data_converter: self.data_converter,
      schedule_to_start_timeout: 60,
      start_to_close_timeout: 60,
      retry_policy: { maximum_attempts: 3 },
      task_list: "flow_default_ruby",
      result_activity_prefix: "FlowDefaultResultActivityRuby",
      result_activity_version: "1.0",
      result_activity_method: "run"
    }
  end

  @exponential_retry_maximum_attempts = Float::INFINITY
  @exponential_retry_maximum_retry_interval_seconds = -1
  @exponential_retry_retry_expiration_seconds = -1
  @exponential_retry_backoff_coefficient = 2.0
  @exponential_retry_initial_retry_interval = 2
  @should_jitter = true
  @exponential_retry_exceptions_to_exclude = []
  @exponential_retry_exceptions_to_include = [Exception]
  @exponential_retry_function = lambda do |first, time_of_failure, attempts, options|

    raise ArgumentError.new("first should be an instance of Time") unless first.instance_of?(Time)
    raise ArgumentError.new("time_of_failure should be nil or an instance of Time") unless time_of_failure.nil? || time_of_failure.instance_of?(Time)
    raise ArgumentError.new("number of attempts should be positive") if (attempts.values.find {|x| x < 0})
    raise ArgumentError.new("number of attempts should be more than 2") if (attempts.values.reduce(0,:+) < 2)
    raise ArgumentError.new("user options must be of type ExponentialRetryOptions") unless options.is_a? ExponentialRetryOptions

    # get values from options
    initial_interval = options.initial_retry_interval
    backoff = options.backoff_coefficient
    max_interval = options.maximum_retry_interval_seconds
    retry_expiration = options.retry_expiration_interval_seconds

    # calculate the initial retry seconds
    result = initial_interval * (backoff ** (attempts.values.reduce(0, :+) - 2))

    # check if the calculated retry seconds is greater than the maximum
    # retry interval allowed. If it is, then replace it with maximum_retry_interval_seconds
    result = max_interval if ( !max_interval.nil? && max_interval != INFINITY && result > max_interval)

    # how much time has elapsed since the first time this task was scheduled
    time_elapsed = time_of_failure.nil? ? 0 : (time_of_failure - first).to_i

    # return -1 if retry will expire
    result = -1 if (! retry_expiration.nil? &&
                    retry_expiration != INFINITY &&
                    (result + time_elapsed) >= retry_expiration)

    return result.to_i
  end

  @jitter_function = lambda do |seed, max_value|
    raise ArgumentError.new("max_value should be greater than 0") unless max_value > 0
    random = Random.new(seed.to_i)
    random.rand(max_value)
  end

  # Selects the data converter to use. By default, YAMLDataConverter is
  # used. S3DataConverter is used when AWS_SWF_BUCKET_NAME environment
  # variable is set.
  def self.data_converter
    return self.default_data_converter unless ENV['AWS_SWF_BUCKET_NAME']
    S3DataConverter.converter
  end

  @default_data_converter = YAMLDataConverter.new
  @use_worker_task_list = "USE_WORKER_TASK_LIST"
end

#exponential_retry_backoff_coefficientFloat (readonly)

The coefficient used to determine how much to back off the interval

timing for an exponential retry scenario.

Returns:

  • (Float)

    ‘2.0` (each retry takes twice as long as the previous attempt)



93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
# File 'lib/aws/decider/flow_defaults.rb', line 93

class FlowConstants

  class << self
    attr_reader :exponential_retry_maximum_retry_interval_seconds, :exponential_retry_retry_expiration_seconds, :exponential_retry_backoff_coefficient, :exponential_retry_maximum_attempts, :exponential_retry_function, :default_data_converter, :exponential_retry_exceptions_to_include, :exponential_retry_exceptions_to_exclude, :jitter_function, :should_jitter, :exponential_retry_initial_retry_interval, :use_worker_task_list
  end

  # Sizes taken from
  # http://docs.aws.amazon.com/amazonswf/latest/apireference/API_FailWorkflowExecutionDecisionAttributes.html
  DATA_LIMIT = 32768

  # Number of chars that can fit in FlowException's reason
  REASON_LIMIT = 256
  # Number of chars that can fit in FlowException's details. Same as
  # DATA_LIMIT
  DETAILS_LIMIT = DATA_LIMIT
  # This is the truncation overhead for serialization.
  TRUNCATION_OVERHEAD = 8000
  # Truncation string added to the end of a trucated string"
  TRUNCATED = "[TRUNCATED]"

  INFINITY = -1
  RETENTION_DEFAULT = 7
  NUM_OF_WORKERS_DEFAULT = 1

  def self.defaults
    {
      domain: "FlowDefault",
      prefix_name: "FlowDefaultWorkflowRuby",
      execution_method: "start",
      version: "1.0",
      # execution timeout (1 hour)
      execution_start_to_close_timeout: "3600",
      data_converter: self.data_converter,
      schedule_to_start_timeout: 60,
      start_to_close_timeout: 60,
      retry_policy: { maximum_attempts: 3 },
      task_list: "flow_default_ruby",
      result_activity_prefix: "FlowDefaultResultActivityRuby",
      result_activity_version: "1.0",
      result_activity_method: "run"
    }
  end

  @exponential_retry_maximum_attempts = Float::INFINITY
  @exponential_retry_maximum_retry_interval_seconds = -1
  @exponential_retry_retry_expiration_seconds = -1
  @exponential_retry_backoff_coefficient = 2.0
  @exponential_retry_initial_retry_interval = 2
  @should_jitter = true
  @exponential_retry_exceptions_to_exclude = []
  @exponential_retry_exceptions_to_include = [Exception]
  @exponential_retry_function = lambda do |first, time_of_failure, attempts, options|

    raise ArgumentError.new("first should be an instance of Time") unless first.instance_of?(Time)
    raise ArgumentError.new("time_of_failure should be nil or an instance of Time") unless time_of_failure.nil? || time_of_failure.instance_of?(Time)
    raise ArgumentError.new("number of attempts should be positive") if (attempts.values.find {|x| x < 0})
    raise ArgumentError.new("number of attempts should be more than 2") if (attempts.values.reduce(0,:+) < 2)
    raise ArgumentError.new("user options must be of type ExponentialRetryOptions") unless options.is_a? ExponentialRetryOptions

    # get values from options
    initial_interval = options.initial_retry_interval
    backoff = options.backoff_coefficient
    max_interval = options.maximum_retry_interval_seconds
    retry_expiration = options.retry_expiration_interval_seconds

    # calculate the initial retry seconds
    result = initial_interval * (backoff ** (attempts.values.reduce(0, :+) - 2))

    # check if the calculated retry seconds is greater than the maximum
    # retry interval allowed. If it is, then replace it with maximum_retry_interval_seconds
    result = max_interval if ( !max_interval.nil? && max_interval != INFINITY && result > max_interval)

    # how much time has elapsed since the first time this task was scheduled
    time_elapsed = time_of_failure.nil? ? 0 : (time_of_failure - first).to_i

    # return -1 if retry will expire
    result = -1 if (! retry_expiration.nil? &&
                    retry_expiration != INFINITY &&
                    (result + time_elapsed) >= retry_expiration)

    return result.to_i
  end

  @jitter_function = lambda do |seed, max_value|
    raise ArgumentError.new("max_value should be greater than 0") unless max_value > 0
    random = Random.new(seed.to_i)
    random.rand(max_value)
  end

  # Selects the data converter to use. By default, YAMLDataConverter is
  # used. S3DataConverter is used when AWS_SWF_BUCKET_NAME environment
  # variable is set.
  def self.data_converter
    return self.default_data_converter unless ENV['AWS_SWF_BUCKET_NAME']
    S3DataConverter.converter
  end

  @default_data_converter = YAMLDataConverter.new
  @use_worker_task_list = "USE_WORKER_TASK_LIST"
end

#exponential_retry_exceptions_to_excludeArray<Object> (readonly)

A list of the exception types to exclude from initiating retry attempts.

Returns:

  • (Array<Object>)

    an empty list. No exceptions are excluded.



93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
# File 'lib/aws/decider/flow_defaults.rb', line 93

class FlowConstants

  class << self
    attr_reader :exponential_retry_maximum_retry_interval_seconds, :exponential_retry_retry_expiration_seconds, :exponential_retry_backoff_coefficient, :exponential_retry_maximum_attempts, :exponential_retry_function, :default_data_converter, :exponential_retry_exceptions_to_include, :exponential_retry_exceptions_to_exclude, :jitter_function, :should_jitter, :exponential_retry_initial_retry_interval, :use_worker_task_list
  end

  # Sizes taken from
  # http://docs.aws.amazon.com/amazonswf/latest/apireference/API_FailWorkflowExecutionDecisionAttributes.html
  DATA_LIMIT = 32768

  # Number of chars that can fit in FlowException's reason
  REASON_LIMIT = 256
  # Number of chars that can fit in FlowException's details. Same as
  # DATA_LIMIT
  DETAILS_LIMIT = DATA_LIMIT
  # This is the truncation overhead for serialization.
  TRUNCATION_OVERHEAD = 8000
  # Truncation string added to the end of a trucated string"
  TRUNCATED = "[TRUNCATED]"

  INFINITY = -1
  RETENTION_DEFAULT = 7
  NUM_OF_WORKERS_DEFAULT = 1

  def self.defaults
    {
      domain: "FlowDefault",
      prefix_name: "FlowDefaultWorkflowRuby",
      execution_method: "start",
      version: "1.0",
      # execution timeout (1 hour)
      execution_start_to_close_timeout: "3600",
      data_converter: self.data_converter,
      schedule_to_start_timeout: 60,
      start_to_close_timeout: 60,
      retry_policy: { maximum_attempts: 3 },
      task_list: "flow_default_ruby",
      result_activity_prefix: "FlowDefaultResultActivityRuby",
      result_activity_version: "1.0",
      result_activity_method: "run"
    }
  end

  @exponential_retry_maximum_attempts = Float::INFINITY
  @exponential_retry_maximum_retry_interval_seconds = -1
  @exponential_retry_retry_expiration_seconds = -1
  @exponential_retry_backoff_coefficient = 2.0
  @exponential_retry_initial_retry_interval = 2
  @should_jitter = true
  @exponential_retry_exceptions_to_exclude = []
  @exponential_retry_exceptions_to_include = [Exception]
  @exponential_retry_function = lambda do |first, time_of_failure, attempts, options|

    raise ArgumentError.new("first should be an instance of Time") unless first.instance_of?(Time)
    raise ArgumentError.new("time_of_failure should be nil or an instance of Time") unless time_of_failure.nil? || time_of_failure.instance_of?(Time)
    raise ArgumentError.new("number of attempts should be positive") if (attempts.values.find {|x| x < 0})
    raise ArgumentError.new("number of attempts should be more than 2") if (attempts.values.reduce(0,:+) < 2)
    raise ArgumentError.new("user options must be of type ExponentialRetryOptions") unless options.is_a? ExponentialRetryOptions

    # get values from options
    initial_interval = options.initial_retry_interval
    backoff = options.backoff_coefficient
    max_interval = options.maximum_retry_interval_seconds
    retry_expiration = options.retry_expiration_interval_seconds

    # calculate the initial retry seconds
    result = initial_interval * (backoff ** (attempts.values.reduce(0, :+) - 2))

    # check if the calculated retry seconds is greater than the maximum
    # retry interval allowed. If it is, then replace it with maximum_retry_interval_seconds
    result = max_interval if ( !max_interval.nil? && max_interval != INFINITY && result > max_interval)

    # how much time has elapsed since the first time this task was scheduled
    time_elapsed = time_of_failure.nil? ? 0 : (time_of_failure - first).to_i

    # return -1 if retry will expire
    result = -1 if (! retry_expiration.nil? &&
                    retry_expiration != INFINITY &&
                    (result + time_elapsed) >= retry_expiration)

    return result.to_i
  end

  @jitter_function = lambda do |seed, max_value|
    raise ArgumentError.new("max_value should be greater than 0") unless max_value > 0
    random = Random.new(seed.to_i)
    random.rand(max_value)
  end

  # Selects the data converter to use. By default, YAMLDataConverter is
  # used. S3DataConverter is used when AWS_SWF_BUCKET_NAME environment
  # variable is set.
  def self.data_converter
    return self.default_data_converter unless ENV['AWS_SWF_BUCKET_NAME']
    S3DataConverter.converter
  end

  @default_data_converter = YAMLDataConverter.new
  @use_worker_task_list = "USE_WORKER_TASK_LIST"
end

#exponential_retry_exceptions_to_includeArray<Class> (readonly)

A list of the exception types to include for initiating retry attempts.

Returns:

  • (Array<Class>)

    ‘Exception` (all exceptions are included)



93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
# File 'lib/aws/decider/flow_defaults.rb', line 93

class FlowConstants

  class << self
    attr_reader :exponential_retry_maximum_retry_interval_seconds, :exponential_retry_retry_expiration_seconds, :exponential_retry_backoff_coefficient, :exponential_retry_maximum_attempts, :exponential_retry_function, :default_data_converter, :exponential_retry_exceptions_to_include, :exponential_retry_exceptions_to_exclude, :jitter_function, :should_jitter, :exponential_retry_initial_retry_interval, :use_worker_task_list
  end

  # Sizes taken from
  # http://docs.aws.amazon.com/amazonswf/latest/apireference/API_FailWorkflowExecutionDecisionAttributes.html
  DATA_LIMIT = 32768

  # Number of chars that can fit in FlowException's reason
  REASON_LIMIT = 256
  # Number of chars that can fit in FlowException's details. Same as
  # DATA_LIMIT
  DETAILS_LIMIT = DATA_LIMIT
  # This is the truncation overhead for serialization.
  TRUNCATION_OVERHEAD = 8000
  # Truncation string added to the end of a trucated string"
  TRUNCATED = "[TRUNCATED]"

  INFINITY = -1
  RETENTION_DEFAULT = 7
  NUM_OF_WORKERS_DEFAULT = 1

  def self.defaults
    {
      domain: "FlowDefault",
      prefix_name: "FlowDefaultWorkflowRuby",
      execution_method: "start",
      version: "1.0",
      # execution timeout (1 hour)
      execution_start_to_close_timeout: "3600",
      data_converter: self.data_converter,
      schedule_to_start_timeout: 60,
      start_to_close_timeout: 60,
      retry_policy: { maximum_attempts: 3 },
      task_list: "flow_default_ruby",
      result_activity_prefix: "FlowDefaultResultActivityRuby",
      result_activity_version: "1.0",
      result_activity_method: "run"
    }
  end

  @exponential_retry_maximum_attempts = Float::INFINITY
  @exponential_retry_maximum_retry_interval_seconds = -1
  @exponential_retry_retry_expiration_seconds = -1
  @exponential_retry_backoff_coefficient = 2.0
  @exponential_retry_initial_retry_interval = 2
  @should_jitter = true
  @exponential_retry_exceptions_to_exclude = []
  @exponential_retry_exceptions_to_include = [Exception]
  @exponential_retry_function = lambda do |first, time_of_failure, attempts, options|

    raise ArgumentError.new("first should be an instance of Time") unless first.instance_of?(Time)
    raise ArgumentError.new("time_of_failure should be nil or an instance of Time") unless time_of_failure.nil? || time_of_failure.instance_of?(Time)
    raise ArgumentError.new("number of attempts should be positive") if (attempts.values.find {|x| x < 0})
    raise ArgumentError.new("number of attempts should be more than 2") if (attempts.values.reduce(0,:+) < 2)
    raise ArgumentError.new("user options must be of type ExponentialRetryOptions") unless options.is_a? ExponentialRetryOptions

    # get values from options
    initial_interval = options.initial_retry_interval
    backoff = options.backoff_coefficient
    max_interval = options.maximum_retry_interval_seconds
    retry_expiration = options.retry_expiration_interval_seconds

    # calculate the initial retry seconds
    result = initial_interval * (backoff ** (attempts.values.reduce(0, :+) - 2))

    # check if the calculated retry seconds is greater than the maximum
    # retry interval allowed. If it is, then replace it with maximum_retry_interval_seconds
    result = max_interval if ( !max_interval.nil? && max_interval != INFINITY && result > max_interval)

    # how much time has elapsed since the first time this task was scheduled
    time_elapsed = time_of_failure.nil? ? 0 : (time_of_failure - first).to_i

    # return -1 if retry will expire
    result = -1 if (! retry_expiration.nil? &&
                    retry_expiration != INFINITY &&
                    (result + time_elapsed) >= retry_expiration)

    return result.to_i
  end

  @jitter_function = lambda do |seed, max_value|
    raise ArgumentError.new("max_value should be greater than 0") unless max_value > 0
    random = Random.new(seed.to_i)
    random.rand(max_value)
  end

  # Selects the data converter to use. By default, YAMLDataConverter is
  # used. S3DataConverter is used when AWS_SWF_BUCKET_NAME environment
  # variable is set.
  def self.data_converter
    return self.default_data_converter unless ENV['AWS_SWF_BUCKET_NAME']
    S3DataConverter.converter
  end

  @default_data_converter = YAMLDataConverter.new
  @use_worker_task_list = "USE_WORKER_TASK_LIST"
end

#exponential_retry_functionObject (readonly)

The default exponential retry function.

Returns:

  • A lambda that takes four parameters: an initial time, a time of failure, a number of attempts, and a set of ExponentialRetryOptions.



93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
# File 'lib/aws/decider/flow_defaults.rb', line 93

class FlowConstants

  class << self
    attr_reader :exponential_retry_maximum_retry_interval_seconds, :exponential_retry_retry_expiration_seconds, :exponential_retry_backoff_coefficient, :exponential_retry_maximum_attempts, :exponential_retry_function, :default_data_converter, :exponential_retry_exceptions_to_include, :exponential_retry_exceptions_to_exclude, :jitter_function, :should_jitter, :exponential_retry_initial_retry_interval, :use_worker_task_list
  end

  # Sizes taken from
  # http://docs.aws.amazon.com/amazonswf/latest/apireference/API_FailWorkflowExecutionDecisionAttributes.html
  DATA_LIMIT = 32768

  # Number of chars that can fit in FlowException's reason
  REASON_LIMIT = 256
  # Number of chars that can fit in FlowException's details. Same as
  # DATA_LIMIT
  DETAILS_LIMIT = DATA_LIMIT
  # This is the truncation overhead for serialization.
  TRUNCATION_OVERHEAD = 8000
  # Truncation string added to the end of a trucated string"
  TRUNCATED = "[TRUNCATED]"

  INFINITY = -1
  RETENTION_DEFAULT = 7
  NUM_OF_WORKERS_DEFAULT = 1

  def self.defaults
    {
      domain: "FlowDefault",
      prefix_name: "FlowDefaultWorkflowRuby",
      execution_method: "start",
      version: "1.0",
      # execution timeout (1 hour)
      execution_start_to_close_timeout: "3600",
      data_converter: self.data_converter,
      schedule_to_start_timeout: 60,
      start_to_close_timeout: 60,
      retry_policy: { maximum_attempts: 3 },
      task_list: "flow_default_ruby",
      result_activity_prefix: "FlowDefaultResultActivityRuby",
      result_activity_version: "1.0",
      result_activity_method: "run"
    }
  end

  @exponential_retry_maximum_attempts = Float::INFINITY
  @exponential_retry_maximum_retry_interval_seconds = -1
  @exponential_retry_retry_expiration_seconds = -1
  @exponential_retry_backoff_coefficient = 2.0
  @exponential_retry_initial_retry_interval = 2
  @should_jitter = true
  @exponential_retry_exceptions_to_exclude = []
  @exponential_retry_exceptions_to_include = [Exception]
  @exponential_retry_function = lambda do |first, time_of_failure, attempts, options|

    raise ArgumentError.new("first should be an instance of Time") unless first.instance_of?(Time)
    raise ArgumentError.new("time_of_failure should be nil or an instance of Time") unless time_of_failure.nil? || time_of_failure.instance_of?(Time)
    raise ArgumentError.new("number of attempts should be positive") if (attempts.values.find {|x| x < 0})
    raise ArgumentError.new("number of attempts should be more than 2") if (attempts.values.reduce(0,:+) < 2)
    raise ArgumentError.new("user options must be of type ExponentialRetryOptions") unless options.is_a? ExponentialRetryOptions

    # get values from options
    initial_interval = options.initial_retry_interval
    backoff = options.backoff_coefficient
    max_interval = options.maximum_retry_interval_seconds
    retry_expiration = options.retry_expiration_interval_seconds

    # calculate the initial retry seconds
    result = initial_interval * (backoff ** (attempts.values.reduce(0, :+) - 2))

    # check if the calculated retry seconds is greater than the maximum
    # retry interval allowed. If it is, then replace it with maximum_retry_interval_seconds
    result = max_interval if ( !max_interval.nil? && max_interval != INFINITY && result > max_interval)

    # how much time has elapsed since the first time this task was scheduled
    time_elapsed = time_of_failure.nil? ? 0 : (time_of_failure - first).to_i

    # return -1 if retry will expire
    result = -1 if (! retry_expiration.nil? &&
                    retry_expiration != INFINITY &&
                    (result + time_elapsed) >= retry_expiration)

    return result.to_i
  end

  @jitter_function = lambda do |seed, max_value|
    raise ArgumentError.new("max_value should be greater than 0") unless max_value > 0
    random = Random.new(seed.to_i)
    random.rand(max_value)
  end

  # Selects the data converter to use. By default, YAMLDataConverter is
  # used. S3DataConverter is used when AWS_SWF_BUCKET_NAME environment
  # variable is set.
  def self.data_converter
    return self.default_data_converter unless ENV['AWS_SWF_BUCKET_NAME']
    S3DataConverter.converter
  end

  @default_data_converter = YAMLDataConverter.new
  @use_worker_task_list = "USE_WORKER_TASK_LIST"
end

#exponential_retry_initial_retry_intervalFixnum (readonly)

The initial retry interval.

Returns:

  • (Fixnum)

    ‘2`



93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
# File 'lib/aws/decider/flow_defaults.rb', line 93

class FlowConstants

  class << self
    attr_reader :exponential_retry_maximum_retry_interval_seconds, :exponential_retry_retry_expiration_seconds, :exponential_retry_backoff_coefficient, :exponential_retry_maximum_attempts, :exponential_retry_function, :default_data_converter, :exponential_retry_exceptions_to_include, :exponential_retry_exceptions_to_exclude, :jitter_function, :should_jitter, :exponential_retry_initial_retry_interval, :use_worker_task_list
  end

  # Sizes taken from
  # http://docs.aws.amazon.com/amazonswf/latest/apireference/API_FailWorkflowExecutionDecisionAttributes.html
  DATA_LIMIT = 32768

  # Number of chars that can fit in FlowException's reason
  REASON_LIMIT = 256
  # Number of chars that can fit in FlowException's details. Same as
  # DATA_LIMIT
  DETAILS_LIMIT = DATA_LIMIT
  # This is the truncation overhead for serialization.
  TRUNCATION_OVERHEAD = 8000
  # Truncation string added to the end of a trucated string"
  TRUNCATED = "[TRUNCATED]"

  INFINITY = -1
  RETENTION_DEFAULT = 7
  NUM_OF_WORKERS_DEFAULT = 1

  def self.defaults
    {
      domain: "FlowDefault",
      prefix_name: "FlowDefaultWorkflowRuby",
      execution_method: "start",
      version: "1.0",
      # execution timeout (1 hour)
      execution_start_to_close_timeout: "3600",
      data_converter: self.data_converter,
      schedule_to_start_timeout: 60,
      start_to_close_timeout: 60,
      retry_policy: { maximum_attempts: 3 },
      task_list: "flow_default_ruby",
      result_activity_prefix: "FlowDefaultResultActivityRuby",
      result_activity_version: "1.0",
      result_activity_method: "run"
    }
  end

  @exponential_retry_maximum_attempts = Float::INFINITY
  @exponential_retry_maximum_retry_interval_seconds = -1
  @exponential_retry_retry_expiration_seconds = -1
  @exponential_retry_backoff_coefficient = 2.0
  @exponential_retry_initial_retry_interval = 2
  @should_jitter = true
  @exponential_retry_exceptions_to_exclude = []
  @exponential_retry_exceptions_to_include = [Exception]
  @exponential_retry_function = lambda do |first, time_of_failure, attempts, options|

    raise ArgumentError.new("first should be an instance of Time") unless first.instance_of?(Time)
    raise ArgumentError.new("time_of_failure should be nil or an instance of Time") unless time_of_failure.nil? || time_of_failure.instance_of?(Time)
    raise ArgumentError.new("number of attempts should be positive") if (attempts.values.find {|x| x < 0})
    raise ArgumentError.new("number of attempts should be more than 2") if (attempts.values.reduce(0,:+) < 2)
    raise ArgumentError.new("user options must be of type ExponentialRetryOptions") unless options.is_a? ExponentialRetryOptions

    # get values from options
    initial_interval = options.initial_retry_interval
    backoff = options.backoff_coefficient
    max_interval = options.maximum_retry_interval_seconds
    retry_expiration = options.retry_expiration_interval_seconds

    # calculate the initial retry seconds
    result = initial_interval * (backoff ** (attempts.values.reduce(0, :+) - 2))

    # check if the calculated retry seconds is greater than the maximum
    # retry interval allowed. If it is, then replace it with maximum_retry_interval_seconds
    result = max_interval if ( !max_interval.nil? && max_interval != INFINITY && result > max_interval)

    # how much time has elapsed since the first time this task was scheduled
    time_elapsed = time_of_failure.nil? ? 0 : (time_of_failure - first).to_i

    # return -1 if retry will expire
    result = -1 if (! retry_expiration.nil? &&
                    retry_expiration != INFINITY &&
                    (result + time_elapsed) >= retry_expiration)

    return result.to_i
  end

  @jitter_function = lambda do |seed, max_value|
    raise ArgumentError.new("max_value should be greater than 0") unless max_value > 0
    random = Random.new(seed.to_i)
    random.rand(max_value)
  end

  # Selects the data converter to use. By default, YAMLDataConverter is
  # used. S3DataConverter is used when AWS_SWF_BUCKET_NAME environment
  # variable is set.
  def self.data_converter
    return self.default_data_converter unless ENV['AWS_SWF_BUCKET_NAME']
    S3DataConverter.converter
  end

  @default_data_converter = YAMLDataConverter.new
  @use_worker_task_list = "USE_WORKER_TASK_LIST"
end

#exponential_retry_maximum_attemptsFloat (readonly)

The maximum number of attempts to make for an exponential retry of a

failed task.

Returns:

  • (Float)

    ‘Float::INFINITY` (there is no limit to the number of retry attempts)



93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
# File 'lib/aws/decider/flow_defaults.rb', line 93

class FlowConstants

  class << self
    attr_reader :exponential_retry_maximum_retry_interval_seconds, :exponential_retry_retry_expiration_seconds, :exponential_retry_backoff_coefficient, :exponential_retry_maximum_attempts, :exponential_retry_function, :default_data_converter, :exponential_retry_exceptions_to_include, :exponential_retry_exceptions_to_exclude, :jitter_function, :should_jitter, :exponential_retry_initial_retry_interval, :use_worker_task_list
  end

  # Sizes taken from
  # http://docs.aws.amazon.com/amazonswf/latest/apireference/API_FailWorkflowExecutionDecisionAttributes.html
  DATA_LIMIT = 32768

  # Number of chars that can fit in FlowException's reason
  REASON_LIMIT = 256
  # Number of chars that can fit in FlowException's details. Same as
  # DATA_LIMIT
  DETAILS_LIMIT = DATA_LIMIT
  # This is the truncation overhead for serialization.
  TRUNCATION_OVERHEAD = 8000
  # Truncation string added to the end of a trucated string"
  TRUNCATED = "[TRUNCATED]"

  INFINITY = -1
  RETENTION_DEFAULT = 7
  NUM_OF_WORKERS_DEFAULT = 1

  def self.defaults
    {
      domain: "FlowDefault",
      prefix_name: "FlowDefaultWorkflowRuby",
      execution_method: "start",
      version: "1.0",
      # execution timeout (1 hour)
      execution_start_to_close_timeout: "3600",
      data_converter: self.data_converter,
      schedule_to_start_timeout: 60,
      start_to_close_timeout: 60,
      retry_policy: { maximum_attempts: 3 },
      task_list: "flow_default_ruby",
      result_activity_prefix: "FlowDefaultResultActivityRuby",
      result_activity_version: "1.0",
      result_activity_method: "run"
    }
  end

  @exponential_retry_maximum_attempts = Float::INFINITY
  @exponential_retry_maximum_retry_interval_seconds = -1
  @exponential_retry_retry_expiration_seconds = -1
  @exponential_retry_backoff_coefficient = 2.0
  @exponential_retry_initial_retry_interval = 2
  @should_jitter = true
  @exponential_retry_exceptions_to_exclude = []
  @exponential_retry_exceptions_to_include = [Exception]
  @exponential_retry_function = lambda do |first, time_of_failure, attempts, options|

    raise ArgumentError.new("first should be an instance of Time") unless first.instance_of?(Time)
    raise ArgumentError.new("time_of_failure should be nil or an instance of Time") unless time_of_failure.nil? || time_of_failure.instance_of?(Time)
    raise ArgumentError.new("number of attempts should be positive") if (attempts.values.find {|x| x < 0})
    raise ArgumentError.new("number of attempts should be more than 2") if (attempts.values.reduce(0,:+) < 2)
    raise ArgumentError.new("user options must be of type ExponentialRetryOptions") unless options.is_a? ExponentialRetryOptions

    # get values from options
    initial_interval = options.initial_retry_interval
    backoff = options.backoff_coefficient
    max_interval = options.maximum_retry_interval_seconds
    retry_expiration = options.retry_expiration_interval_seconds

    # calculate the initial retry seconds
    result = initial_interval * (backoff ** (attempts.values.reduce(0, :+) - 2))

    # check if the calculated retry seconds is greater than the maximum
    # retry interval allowed. If it is, then replace it with maximum_retry_interval_seconds
    result = max_interval if ( !max_interval.nil? && max_interval != INFINITY && result > max_interval)

    # how much time has elapsed since the first time this task was scheduled
    time_elapsed = time_of_failure.nil? ? 0 : (time_of_failure - first).to_i

    # return -1 if retry will expire
    result = -1 if (! retry_expiration.nil? &&
                    retry_expiration != INFINITY &&
                    (result + time_elapsed) >= retry_expiration)

    return result.to_i
  end

  @jitter_function = lambda do |seed, max_value|
    raise ArgumentError.new("max_value should be greater than 0") unless max_value > 0
    random = Random.new(seed.to_i)
    random.rand(max_value)
  end

  # Selects the data converter to use. By default, YAMLDataConverter is
  # used. S3DataConverter is used when AWS_SWF_BUCKET_NAME environment
  # variable is set.
  def self.data_converter
    return self.default_data_converter unless ENV['AWS_SWF_BUCKET_NAME']
    S3DataConverter.converter
  end

  @default_data_converter = YAMLDataConverter.new
  @use_worker_task_list = "USE_WORKER_TASK_LIST"
end

#exponential_retry_maximum_retry_interval_secondsFixnum (readonly)

The maximum interval that can pass, in seconds, before a retry occurs.

Returns:

  • (Fixnum)

    ‘-1` (no maximum)



93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
# File 'lib/aws/decider/flow_defaults.rb', line 93

class FlowConstants

  class << self
    attr_reader :exponential_retry_maximum_retry_interval_seconds, :exponential_retry_retry_expiration_seconds, :exponential_retry_backoff_coefficient, :exponential_retry_maximum_attempts, :exponential_retry_function, :default_data_converter, :exponential_retry_exceptions_to_include, :exponential_retry_exceptions_to_exclude, :jitter_function, :should_jitter, :exponential_retry_initial_retry_interval, :use_worker_task_list
  end

  # Sizes taken from
  # http://docs.aws.amazon.com/amazonswf/latest/apireference/API_FailWorkflowExecutionDecisionAttributes.html
  DATA_LIMIT = 32768

  # Number of chars that can fit in FlowException's reason
  REASON_LIMIT = 256
  # Number of chars that can fit in FlowException's details. Same as
  # DATA_LIMIT
  DETAILS_LIMIT = DATA_LIMIT
  # This is the truncation overhead for serialization.
  TRUNCATION_OVERHEAD = 8000
  # Truncation string added to the end of a trucated string"
  TRUNCATED = "[TRUNCATED]"

  INFINITY = -1
  RETENTION_DEFAULT = 7
  NUM_OF_WORKERS_DEFAULT = 1

  def self.defaults
    {
      domain: "FlowDefault",
      prefix_name: "FlowDefaultWorkflowRuby",
      execution_method: "start",
      version: "1.0",
      # execution timeout (1 hour)
      execution_start_to_close_timeout: "3600",
      data_converter: self.data_converter,
      schedule_to_start_timeout: 60,
      start_to_close_timeout: 60,
      retry_policy: { maximum_attempts: 3 },
      task_list: "flow_default_ruby",
      result_activity_prefix: "FlowDefaultResultActivityRuby",
      result_activity_version: "1.0",
      result_activity_method: "run"
    }
  end

  @exponential_retry_maximum_attempts = Float::INFINITY
  @exponential_retry_maximum_retry_interval_seconds = -1
  @exponential_retry_retry_expiration_seconds = -1
  @exponential_retry_backoff_coefficient = 2.0
  @exponential_retry_initial_retry_interval = 2
  @should_jitter = true
  @exponential_retry_exceptions_to_exclude = []
  @exponential_retry_exceptions_to_include = [Exception]
  @exponential_retry_function = lambda do |first, time_of_failure, attempts, options|

    raise ArgumentError.new("first should be an instance of Time") unless first.instance_of?(Time)
    raise ArgumentError.new("time_of_failure should be nil or an instance of Time") unless time_of_failure.nil? || time_of_failure.instance_of?(Time)
    raise ArgumentError.new("number of attempts should be positive") if (attempts.values.find {|x| x < 0})
    raise ArgumentError.new("number of attempts should be more than 2") if (attempts.values.reduce(0,:+) < 2)
    raise ArgumentError.new("user options must be of type ExponentialRetryOptions") unless options.is_a? ExponentialRetryOptions

    # get values from options
    initial_interval = options.initial_retry_interval
    backoff = options.backoff_coefficient
    max_interval = options.maximum_retry_interval_seconds
    retry_expiration = options.retry_expiration_interval_seconds

    # calculate the initial retry seconds
    result = initial_interval * (backoff ** (attempts.values.reduce(0, :+) - 2))

    # check if the calculated retry seconds is greater than the maximum
    # retry interval allowed. If it is, then replace it with maximum_retry_interval_seconds
    result = max_interval if ( !max_interval.nil? && max_interval != INFINITY && result > max_interval)

    # how much time has elapsed since the first time this task was scheduled
    time_elapsed = time_of_failure.nil? ? 0 : (time_of_failure - first).to_i

    # return -1 if retry will expire
    result = -1 if (! retry_expiration.nil? &&
                    retry_expiration != INFINITY &&
                    (result + time_elapsed) >= retry_expiration)

    return result.to_i
  end

  @jitter_function = lambda do |seed, max_value|
    raise ArgumentError.new("max_value should be greater than 0") unless max_value > 0
    random = Random.new(seed.to_i)
    random.rand(max_value)
  end

  # Selects the data converter to use. By default, YAMLDataConverter is
  # used. S3DataConverter is used when AWS_SWF_BUCKET_NAME environment
  # variable is set.
  def self.data_converter
    return self.default_data_converter unless ENV['AWS_SWF_BUCKET_NAME']
    S3DataConverter.converter
  end

  @default_data_converter = YAMLDataConverter.new
  @use_worker_task_list = "USE_WORKER_TASK_LIST"
end

#exponential_retry_retry_expiration_secondsFixnum (readonly)

The maximum time that can pass, in seconds, before an exponential retry

attempt is considered to be a failure.

Returns:

  • (Fixnum)

    ‘-1` (no expiration for a retry attempt)



93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
# File 'lib/aws/decider/flow_defaults.rb', line 93

class FlowConstants

  class << self
    attr_reader :exponential_retry_maximum_retry_interval_seconds, :exponential_retry_retry_expiration_seconds, :exponential_retry_backoff_coefficient, :exponential_retry_maximum_attempts, :exponential_retry_function, :default_data_converter, :exponential_retry_exceptions_to_include, :exponential_retry_exceptions_to_exclude, :jitter_function, :should_jitter, :exponential_retry_initial_retry_interval, :use_worker_task_list
  end

  # Sizes taken from
  # http://docs.aws.amazon.com/amazonswf/latest/apireference/API_FailWorkflowExecutionDecisionAttributes.html
  DATA_LIMIT = 32768

  # Number of chars that can fit in FlowException's reason
  REASON_LIMIT = 256
  # Number of chars that can fit in FlowException's details. Same as
  # DATA_LIMIT
  DETAILS_LIMIT = DATA_LIMIT
  # This is the truncation overhead for serialization.
  TRUNCATION_OVERHEAD = 8000
  # Truncation string added to the end of a trucated string"
  TRUNCATED = "[TRUNCATED]"

  INFINITY = -1
  RETENTION_DEFAULT = 7
  NUM_OF_WORKERS_DEFAULT = 1

  def self.defaults
    {
      domain: "FlowDefault",
      prefix_name: "FlowDefaultWorkflowRuby",
      execution_method: "start",
      version: "1.0",
      # execution timeout (1 hour)
      execution_start_to_close_timeout: "3600",
      data_converter: self.data_converter,
      schedule_to_start_timeout: 60,
      start_to_close_timeout: 60,
      retry_policy: { maximum_attempts: 3 },
      task_list: "flow_default_ruby",
      result_activity_prefix: "FlowDefaultResultActivityRuby",
      result_activity_version: "1.0",
      result_activity_method: "run"
    }
  end

  @exponential_retry_maximum_attempts = Float::INFINITY
  @exponential_retry_maximum_retry_interval_seconds = -1
  @exponential_retry_retry_expiration_seconds = -1
  @exponential_retry_backoff_coefficient = 2.0
  @exponential_retry_initial_retry_interval = 2
  @should_jitter = true
  @exponential_retry_exceptions_to_exclude = []
  @exponential_retry_exceptions_to_include = [Exception]
  @exponential_retry_function = lambda do |first, time_of_failure, attempts, options|

    raise ArgumentError.new("first should be an instance of Time") unless first.instance_of?(Time)
    raise ArgumentError.new("time_of_failure should be nil or an instance of Time") unless time_of_failure.nil? || time_of_failure.instance_of?(Time)
    raise ArgumentError.new("number of attempts should be positive") if (attempts.values.find {|x| x < 0})
    raise ArgumentError.new("number of attempts should be more than 2") if (attempts.values.reduce(0,:+) < 2)
    raise ArgumentError.new("user options must be of type ExponentialRetryOptions") unless options.is_a? ExponentialRetryOptions

    # get values from options
    initial_interval = options.initial_retry_interval
    backoff = options.backoff_coefficient
    max_interval = options.maximum_retry_interval_seconds
    retry_expiration = options.retry_expiration_interval_seconds

    # calculate the initial retry seconds
    result = initial_interval * (backoff ** (attempts.values.reduce(0, :+) - 2))

    # check if the calculated retry seconds is greater than the maximum
    # retry interval allowed. If it is, then replace it with maximum_retry_interval_seconds
    result = max_interval if ( !max_interval.nil? && max_interval != INFINITY && result > max_interval)

    # how much time has elapsed since the first time this task was scheduled
    time_elapsed = time_of_failure.nil? ? 0 : (time_of_failure - first).to_i

    # return -1 if retry will expire
    result = -1 if (! retry_expiration.nil? &&
                    retry_expiration != INFINITY &&
                    (result + time_elapsed) >= retry_expiration)

    return result.to_i
  end

  @jitter_function = lambda do |seed, max_value|
    raise ArgumentError.new("max_value should be greater than 0") unless max_value > 0
    random = Random.new(seed.to_i)
    random.rand(max_value)
  end

  # Selects the data converter to use. By default, YAMLDataConverter is
  # used. S3DataConverter is used when AWS_SWF_BUCKET_NAME environment
  # variable is set.
  def self.data_converter
    return self.default_data_converter unless ENV['AWS_SWF_BUCKET_NAME']
    S3DataConverter.converter
  end

  @default_data_converter = YAMLDataConverter.new
  @use_worker_task_list = "USE_WORKER_TASK_LIST"
end

#jitter_functionObject (readonly)

The function that is used to determine how long to wait for the next

retry attempt when *should_jitter* is set to `true`.

Returns:

  • a lambda that takes a random number seed and a maximum value (must be > 0).



93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
# File 'lib/aws/decider/flow_defaults.rb', line 93

class FlowConstants

  class << self
    attr_reader :exponential_retry_maximum_retry_interval_seconds, :exponential_retry_retry_expiration_seconds, :exponential_retry_backoff_coefficient, :exponential_retry_maximum_attempts, :exponential_retry_function, :default_data_converter, :exponential_retry_exceptions_to_include, :exponential_retry_exceptions_to_exclude, :jitter_function, :should_jitter, :exponential_retry_initial_retry_interval, :use_worker_task_list
  end

  # Sizes taken from
  # http://docs.aws.amazon.com/amazonswf/latest/apireference/API_FailWorkflowExecutionDecisionAttributes.html
  DATA_LIMIT = 32768

  # Number of chars that can fit in FlowException's reason
  REASON_LIMIT = 256
  # Number of chars that can fit in FlowException's details. Same as
  # DATA_LIMIT
  DETAILS_LIMIT = DATA_LIMIT
  # This is the truncation overhead for serialization.
  TRUNCATION_OVERHEAD = 8000
  # Truncation string added to the end of a trucated string"
  TRUNCATED = "[TRUNCATED]"

  INFINITY = -1
  RETENTION_DEFAULT = 7
  NUM_OF_WORKERS_DEFAULT = 1

  def self.defaults
    {
      domain: "FlowDefault",
      prefix_name: "FlowDefaultWorkflowRuby",
      execution_method: "start",
      version: "1.0",
      # execution timeout (1 hour)
      execution_start_to_close_timeout: "3600",
      data_converter: self.data_converter,
      schedule_to_start_timeout: 60,
      start_to_close_timeout: 60,
      retry_policy: { maximum_attempts: 3 },
      task_list: "flow_default_ruby",
      result_activity_prefix: "FlowDefaultResultActivityRuby",
      result_activity_version: "1.0",
      result_activity_method: "run"
    }
  end

  @exponential_retry_maximum_attempts = Float::INFINITY
  @exponential_retry_maximum_retry_interval_seconds = -1
  @exponential_retry_retry_expiration_seconds = -1
  @exponential_retry_backoff_coefficient = 2.0
  @exponential_retry_initial_retry_interval = 2
  @should_jitter = true
  @exponential_retry_exceptions_to_exclude = []
  @exponential_retry_exceptions_to_include = [Exception]
  @exponential_retry_function = lambda do |first, time_of_failure, attempts, options|

    raise ArgumentError.new("first should be an instance of Time") unless first.instance_of?(Time)
    raise ArgumentError.new("time_of_failure should be nil or an instance of Time") unless time_of_failure.nil? || time_of_failure.instance_of?(Time)
    raise ArgumentError.new("number of attempts should be positive") if (attempts.values.find {|x| x < 0})
    raise ArgumentError.new("number of attempts should be more than 2") if (attempts.values.reduce(0,:+) < 2)
    raise ArgumentError.new("user options must be of type ExponentialRetryOptions") unless options.is_a? ExponentialRetryOptions

    # get values from options
    initial_interval = options.initial_retry_interval
    backoff = options.backoff_coefficient
    max_interval = options.maximum_retry_interval_seconds
    retry_expiration = options.retry_expiration_interval_seconds

    # calculate the initial retry seconds
    result = initial_interval * (backoff ** (attempts.values.reduce(0, :+) - 2))

    # check if the calculated retry seconds is greater than the maximum
    # retry interval allowed. If it is, then replace it with maximum_retry_interval_seconds
    result = max_interval if ( !max_interval.nil? && max_interval != INFINITY && result > max_interval)

    # how much time has elapsed since the first time this task was scheduled
    time_elapsed = time_of_failure.nil? ? 0 : (time_of_failure - first).to_i

    # return -1 if retry will expire
    result = -1 if (! retry_expiration.nil? &&
                    retry_expiration != INFINITY &&
                    (result + time_elapsed) >= retry_expiration)

    return result.to_i
  end

  @jitter_function = lambda do |seed, max_value|
    raise ArgumentError.new("max_value should be greater than 0") unless max_value > 0
    random = Random.new(seed.to_i)
    random.rand(max_value)
  end

  # Selects the data converter to use. By default, YAMLDataConverter is
  # used. S3DataConverter is used when AWS_SWF_BUCKET_NAME environment
  # variable is set.
  def self.data_converter
    return self.default_data_converter unless ENV['AWS_SWF_BUCKET_NAME']
    S3DataConverter.converter
  end

  @default_data_converter = YAMLDataConverter.new
  @use_worker_task_list = "USE_WORKER_TASK_LIST"
end

#should_jitterBoolean (readonly)

Indicates whether there should be any randomness built in to the timing

of the retry attempt.

Returns:

  • (Boolean)

    ‘true`



93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
# File 'lib/aws/decider/flow_defaults.rb', line 93

class FlowConstants

  class << self
    attr_reader :exponential_retry_maximum_retry_interval_seconds, :exponential_retry_retry_expiration_seconds, :exponential_retry_backoff_coefficient, :exponential_retry_maximum_attempts, :exponential_retry_function, :default_data_converter, :exponential_retry_exceptions_to_include, :exponential_retry_exceptions_to_exclude, :jitter_function, :should_jitter, :exponential_retry_initial_retry_interval, :use_worker_task_list
  end

  # Sizes taken from
  # http://docs.aws.amazon.com/amazonswf/latest/apireference/API_FailWorkflowExecutionDecisionAttributes.html
  DATA_LIMIT = 32768

  # Number of chars that can fit in FlowException's reason
  REASON_LIMIT = 256
  # Number of chars that can fit in FlowException's details. Same as
  # DATA_LIMIT
  DETAILS_LIMIT = DATA_LIMIT
  # This is the truncation overhead for serialization.
  TRUNCATION_OVERHEAD = 8000
  # Truncation string added to the end of a trucated string"
  TRUNCATED = "[TRUNCATED]"

  INFINITY = -1
  RETENTION_DEFAULT = 7
  NUM_OF_WORKERS_DEFAULT = 1

  def self.defaults
    {
      domain: "FlowDefault",
      prefix_name: "FlowDefaultWorkflowRuby",
      execution_method: "start",
      version: "1.0",
      # execution timeout (1 hour)
      execution_start_to_close_timeout: "3600",
      data_converter: self.data_converter,
      schedule_to_start_timeout: 60,
      start_to_close_timeout: 60,
      retry_policy: { maximum_attempts: 3 },
      task_list: "flow_default_ruby",
      result_activity_prefix: "FlowDefaultResultActivityRuby",
      result_activity_version: "1.0",
      result_activity_method: "run"
    }
  end

  @exponential_retry_maximum_attempts = Float::INFINITY
  @exponential_retry_maximum_retry_interval_seconds = -1
  @exponential_retry_retry_expiration_seconds = -1
  @exponential_retry_backoff_coefficient = 2.0
  @exponential_retry_initial_retry_interval = 2
  @should_jitter = true
  @exponential_retry_exceptions_to_exclude = []
  @exponential_retry_exceptions_to_include = [Exception]
  @exponential_retry_function = lambda do |first, time_of_failure, attempts, options|

    raise ArgumentError.new("first should be an instance of Time") unless first.instance_of?(Time)
    raise ArgumentError.new("time_of_failure should be nil or an instance of Time") unless time_of_failure.nil? || time_of_failure.instance_of?(Time)
    raise ArgumentError.new("number of attempts should be positive") if (attempts.values.find {|x| x < 0})
    raise ArgumentError.new("number of attempts should be more than 2") if (attempts.values.reduce(0,:+) < 2)
    raise ArgumentError.new("user options must be of type ExponentialRetryOptions") unless options.is_a? ExponentialRetryOptions

    # get values from options
    initial_interval = options.initial_retry_interval
    backoff = options.backoff_coefficient
    max_interval = options.maximum_retry_interval_seconds
    retry_expiration = options.retry_expiration_interval_seconds

    # calculate the initial retry seconds
    result = initial_interval * (backoff ** (attempts.values.reduce(0, :+) - 2))

    # check if the calculated retry seconds is greater than the maximum
    # retry interval allowed. If it is, then replace it with maximum_retry_interval_seconds
    result = max_interval if ( !max_interval.nil? && max_interval != INFINITY && result > max_interval)

    # how much time has elapsed since the first time this task was scheduled
    time_elapsed = time_of_failure.nil? ? 0 : (time_of_failure - first).to_i

    # return -1 if retry will expire
    result = -1 if (! retry_expiration.nil? &&
                    retry_expiration != INFINITY &&
                    (result + time_elapsed) >= retry_expiration)

    return result.to_i
  end

  @jitter_function = lambda do |seed, max_value|
    raise ArgumentError.new("max_value should be greater than 0") unless max_value > 0
    random = Random.new(seed.to_i)
    random.rand(max_value)
  end

  # Selects the data converter to use. By default, YAMLDataConverter is
  # used. S3DataConverter is used when AWS_SWF_BUCKET_NAME environment
  # variable is set.
  def self.data_converter
    return self.default_data_converter unless ENV['AWS_SWF_BUCKET_NAME']
    S3DataConverter.converter
  end

  @default_data_converter = YAMLDataConverter.new
  @use_worker_task_list = "USE_WORKER_TASK_LIST"
end

#use_worker_task_listString (readonly)

Used with activity and workflow options. Indicates that the activity

and/or workflow should use the same task list that the associated
worker is polling on.

Returns:

  • (String)

    “USE_WORKER_TASK_LIST”



93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
# File 'lib/aws/decider/flow_defaults.rb', line 93

class FlowConstants

  class << self
    attr_reader :exponential_retry_maximum_retry_interval_seconds, :exponential_retry_retry_expiration_seconds, :exponential_retry_backoff_coefficient, :exponential_retry_maximum_attempts, :exponential_retry_function, :default_data_converter, :exponential_retry_exceptions_to_include, :exponential_retry_exceptions_to_exclude, :jitter_function, :should_jitter, :exponential_retry_initial_retry_interval, :use_worker_task_list
  end

  # Sizes taken from
  # http://docs.aws.amazon.com/amazonswf/latest/apireference/API_FailWorkflowExecutionDecisionAttributes.html
  DATA_LIMIT = 32768

  # Number of chars that can fit in FlowException's reason
  REASON_LIMIT = 256
  # Number of chars that can fit in FlowException's details. Same as
  # DATA_LIMIT
  DETAILS_LIMIT = DATA_LIMIT
  # This is the truncation overhead for serialization.
  TRUNCATION_OVERHEAD = 8000
  # Truncation string added to the end of a trucated string"
  TRUNCATED = "[TRUNCATED]"

  INFINITY = -1
  RETENTION_DEFAULT = 7
  NUM_OF_WORKERS_DEFAULT = 1

  def self.defaults
    {
      domain: "FlowDefault",
      prefix_name: "FlowDefaultWorkflowRuby",
      execution_method: "start",
      version: "1.0",
      # execution timeout (1 hour)
      execution_start_to_close_timeout: "3600",
      data_converter: self.data_converter,
      schedule_to_start_timeout: 60,
      start_to_close_timeout: 60,
      retry_policy: { maximum_attempts: 3 },
      task_list: "flow_default_ruby",
      result_activity_prefix: "FlowDefaultResultActivityRuby",
      result_activity_version: "1.0",
      result_activity_method: "run"
    }
  end

  @exponential_retry_maximum_attempts = Float::INFINITY
  @exponential_retry_maximum_retry_interval_seconds = -1
  @exponential_retry_retry_expiration_seconds = -1
  @exponential_retry_backoff_coefficient = 2.0
  @exponential_retry_initial_retry_interval = 2
  @should_jitter = true
  @exponential_retry_exceptions_to_exclude = []
  @exponential_retry_exceptions_to_include = [Exception]
  @exponential_retry_function = lambda do |first, time_of_failure, attempts, options|

    raise ArgumentError.new("first should be an instance of Time") unless first.instance_of?(Time)
    raise ArgumentError.new("time_of_failure should be nil or an instance of Time") unless time_of_failure.nil? || time_of_failure.instance_of?(Time)
    raise ArgumentError.new("number of attempts should be positive") if (attempts.values.find {|x| x < 0})
    raise ArgumentError.new("number of attempts should be more than 2") if (attempts.values.reduce(0,:+) < 2)
    raise ArgumentError.new("user options must be of type ExponentialRetryOptions") unless options.is_a? ExponentialRetryOptions

    # get values from options
    initial_interval = options.initial_retry_interval
    backoff = options.backoff_coefficient
    max_interval = options.maximum_retry_interval_seconds
    retry_expiration = options.retry_expiration_interval_seconds

    # calculate the initial retry seconds
    result = initial_interval * (backoff ** (attempts.values.reduce(0, :+) - 2))

    # check if the calculated retry seconds is greater than the maximum
    # retry interval allowed. If it is, then replace it with maximum_retry_interval_seconds
    result = max_interval if ( !max_interval.nil? && max_interval != INFINITY && result > max_interval)

    # how much time has elapsed since the first time this task was scheduled
    time_elapsed = time_of_failure.nil? ? 0 : (time_of_failure - first).to_i

    # return -1 if retry will expire
    result = -1 if (! retry_expiration.nil? &&
                    retry_expiration != INFINITY &&
                    (result + time_elapsed) >= retry_expiration)

    return result.to_i
  end

  @jitter_function = lambda do |seed, max_value|
    raise ArgumentError.new("max_value should be greater than 0") unless max_value > 0
    random = Random.new(seed.to_i)
    random.rand(max_value)
  end

  # Selects the data converter to use. By default, YAMLDataConverter is
  # used. S3DataConverter is used when AWS_SWF_BUCKET_NAME environment
  # variable is set.
  def self.data_converter
    return self.default_data_converter unless ENV['AWS_SWF_BUCKET_NAME']
    S3DataConverter.converter
  end

  @default_data_converter = YAMLDataConverter.new
  @use_worker_task_list = "USE_WORKER_TASK_LIST"
end

Class Method Details

.data_converterObject

Selects the data converter to use. By default, YAMLDataConverter is used. S3DataConverter is used when AWS_SWF_BUCKET_NAME environment variable is set.



185
186
187
188
# File 'lib/aws/decider/flow_defaults.rb', line 185

def self.data_converter
  return self.default_data_converter unless ENV['AWS_SWF_BUCKET_NAME']
  S3DataConverter.converter
end

.defaultsObject



117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
# File 'lib/aws/decider/flow_defaults.rb', line 117

def self.defaults
  {
    domain: "FlowDefault",
    prefix_name: "FlowDefaultWorkflowRuby",
    execution_method: "start",
    version: "1.0",
    # execution timeout (1 hour)
    execution_start_to_close_timeout: "3600",
    data_converter: self.data_converter,
    schedule_to_start_timeout: 60,
    start_to_close_timeout: 60,
    retry_policy: { maximum_attempts: 3 },
    task_list: "flow_default_ruby",
    result_activity_prefix: "FlowDefaultResultActivityRuby",
    result_activity_version: "1.0",
    result_activity_method: "run"
  }
end