Module: Bosh::OpenStackCloud::Helpers

Included in:
AvailabilityZoneProvider, Cloud, Network, NetworkConfigurator
Defined in:
lib/cloud/openstack/helpers.rb

Constant Summary collapse

DEFAULT_STATE_TIMEOUT =

Default timeout for target state (in seconds)

300
MAX_RETRIES =

Max number of retries

10
DEFAULT_RETRY_TIMEOUT =

Default timeout before retrying a call (in seconds)

3

Instance Method Summary collapse

Instance Method Details

#cloud_error(message, exception = nil) ⇒ Object

Raises CloudError exception

Parameters:

  • message (String)

    Message about what went wrong

  • exception (Exception) (defaults to: nil)

    Exception to be logged (optional)

Raises:

  • (Bosh::Clouds::CloudError)


17
18
19
20
21
# File 'lib/cloud/openstack/helpers.rb', line 17

def cloud_error(message, exception = nil)
  @logger.error(message) if @logger
  @logger.error(exception) if @logger && exception
  raise Bosh::Clouds::CloudError, message
end

#parse_openstack_response(response, *keys) ⇒ Hash

Parses and look ups for keys in an OpenStack response

Parameters:

  • response (Excon::Response)

    Response from OpenStack API

  • keys (Array<String>)

    Keys to look up in response

Returns:

  • (Hash)

    Contents at the first key found, or nil if not found



73
74
75
76
77
78
79
80
81
82
83
84
# File 'lib/cloud/openstack/helpers.rb', line 73

def parse_openstack_response(response, *keys)
  unless response.body.empty?
    begin
      body = JSON.parse(response.body)
      key = keys.detect { |k| body.has_key?(k)}
      return body[key] if key
    rescue JSON::ParserError
      # do nothing
    end
  end
  nil
end

#task_checkpointObject



144
145
146
# File 'lib/cloud/openstack/helpers.rb', line 144

def task_checkpoint
  Bosh::Clouds::Config.task_checkpoint
end

#wait_resource(resource, target_state, state_method = :status, allow_notfound = false) ⇒ Object

Waits for a resource to be on a target state

Parameters:

  • resource (Fog::Model)

    Resource to query

  • target_state (Array<Symbol>)

    Resource’s state desired

  • state_method (Symbol) (defaults to: :status)

    Resource’s method to fetch state

  • allow_notfound (Boolean) (defaults to: false)

    true if resource could be not found



93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
# File 'lib/cloud/openstack/helpers.rb', line 93

def wait_resource(resource, target_state, state_method = :status, allow_notfound = false)

  started_at = Time.now
  desc = resource.class.name.split("::").last.to_s + " `" + resource.id.to_s + "'"
  target_state = Array(target_state)
  state_timeout = @state_timeout || DEFAULT_STATE_TIMEOUT

  loop do
    task_checkpoint

    duration = Time.now - started_at

    if duration > state_timeout
      cloud_error("Timed out waiting for #{desc} to be #{target_state.join(", ")}")
    end

    if @logger
      @logger.debug("Waiting for #{desc} to be #{target_state.join(", ")} (#{duration}s)")
    end

    # If resource reload is nil, perhaps it's because resource went away
    # (ie: a destroy operation). Don't raise an exception if this is
    # expected (allow_notfound).
    if with_openstack { resource.reload.nil? }
      break if allow_notfound
      cloud_error("#{desc}: Resource not found")
    else
      state =  with_openstack { resource.send(state_method).downcase.to_sym }
    end

    # This is not a very strong convention, but some resources
    # have 'error', 'failed' and 'killed' states, we probably don't want to keep
    # waiting if we're in these states. Alternatively we could introduce a
    # set of 'loop breaker' states but that doesn't seem very helpful
    # at the moment
    if state == :error || state == :failed || state == :killed
      cloud_error("#{desc} state is #{state}, expected #{target_state.join(", ")}")
    end

    break if target_state.include?(state)

    sleep(@wait_resource_poll_interval)

  end

  if @logger
    total = Time.now - started_at
    @logger.info("#{desc} is now #{target_state.join(", ")}, took #{total}s")
  end
end

#with_openstackObject



23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
# File 'lib/cloud/openstack/helpers.rb', line 23

def with_openstack
  retries = 0
  begin
    yield

  rescue Excon::Errors::RequestEntityTooLarge => e
    # If we find a rate limit error, parse message, wait, and retry
    overlimit = parse_openstack_response(e.response, "overLimit", "overLimitFault")
    unless overlimit.nil? || retries >= MAX_RETRIES
      task_checkpoint
      wait_time = overlimit["retryAfter"] || e.response.headers["Retry-After"] || DEFAULT_RETRY_TIMEOUT
      details = "#{overlimit["message"]} - #{overlimit["details"]}"
      @logger.debug("OpenStack API Over Limit (#{details}), waiting #{wait_time} seconds before retrying") if @logger
      sleep(wait_time.to_i)
      retries += 1
      retry
    end
    cloud_error("OpenStack API Request Entity Too Large error. Check task debug log for details.", e)

  rescue Excon::Errors::ServiceUnavailable => e
    unless retries >= MAX_RETRIES
      retries += 1
      @logger.debug("OpenStack API Service Unavailable error, retrying (#{retries})") if @logger
      sleep(DEFAULT_RETRY_TIMEOUT)
      retry
    end
    cloud_error("OpenStack API Service Unavailable error. Check task debug log for details.", e)

  rescue Excon::Errors::BadRequest => e
    badrequest = parse_openstack_response(e.response, "badRequest")
    details = badrequest.nil? ? "" : " (#{badrequest["message"]})"
    cloud_error("OpenStack API Bad Request#{details}. Check task debug log for details.", e)

  rescue Excon::Errors::InternalServerError => e
    unless retries >= MAX_RETRIES
      retries += 1
      @logger.debug("OpenStack API Internal Server error, retrying (#{retries})") if @logger
      sleep(DEFAULT_RETRY_TIMEOUT)
      retry
    end
    cloud_error("OpenStack API Internal Server error. Check task debug log for details.", e)
  end
end