Class: HybridPlatformsConductor::Deployer
- Inherits:
-
Object
- Object
- HybridPlatformsConductor::Deployer
- Includes:
- LoggerHelpers
- Defined in:
- lib/hybrid_platforms_conductor/deployer.rb
Overview
Gives ways to deploy on several nodes
Defined Under Namespace
Modules: ConfigDSLExtension
Constant Summary collapse
- PACKAGING_FUTEX_FILE =
String: File used as a Futex for packaging
"#{Dir.tmpdir}/hpc_packaging"
Constants included from LoggerHelpers
LoggerHelpers::LEVELS_MODIFIERS, LoggerHelpers::LEVELS_TO_STDERR
Instance Attribute Summary collapse
-
#concurrent_execution ⇒ Object
Concurrent execution of the deployment? [default = false] Boolean.
-
#local_environment ⇒ Object
Are we deploying in a local environment? Boolean.
-
#nbr_retries_on_error ⇒ Object
Number of retries to do in case of non-deterministic errors during deployment Integer.
-
#secrets ⇒ Object
The list of JSON secrets Array<Hash>.
-
#timeout ⇒ Object
Timeout (in seconds) to be used for each deployment, or nil for no timeout [default = nil] Integer or nil.
-
#use_why_run ⇒ Object
Do we use why-run mode while deploying? [default = false] Boolean.
Instance Method Summary collapse
-
#deploy_on(*nodes_selectors) ⇒ Object
Deploy on a given list of nodes selectors.
-
#deployment_info_from(*nodes) ⇒ Object
Get deployment info from a list of nodes selectors.
-
#initialize(logger: Logger.new(STDOUT), logger_stderr: Logger.new(STDERR), config: Config.new, cmd_runner: CmdRunner.new, nodes_handler: NodesHandler.new, actions_executor: ActionsExecutor.new, services_handler: ServicesHandler.new) ⇒ Deployer
constructor
Constructor.
-
#options_parse(options_parser, parallel_switch: true, why_run_switch: false, timeout_options: true) ⇒ Object
Complete an option parser with options meant to control this Deployer.
-
#parse_deploy_output(node, stdout, stderr) ⇒ Object
Parse stdout and stderr of a given deploy run and get the list of tasks with their status.
-
#validate_params ⇒ Object
Validate that parsed parameters are valid.
-
#with_test_provisioned_instance(provisioner_id, node, environment:, reuse_instance: false) ⇒ Object
Provision a test instance for a given node.
Methods included from LoggerHelpers
#err, #init_loggers, #log_component=, #log_debug?, #log_level=, #out, #section, #set_loggers_format, #stderr_device, #stderr_device=, #stderr_displayed?, #stdout_device, #stdout_device=, #stdout_displayed?, #stdouts_to_s, #with_progress_bar
Constructor Details
#initialize(logger: Logger.new(STDOUT), logger_stderr: Logger.new(STDERR), config: Config.new, cmd_runner: CmdRunner.new, nodes_handler: NodesHandler.new, actions_executor: ActionsExecutor.new, services_handler: ServicesHandler.new) ⇒ Deployer
Constructor
- Parameters
-
logger (Logger): Logger to be used [default: Logger.new(STDOUT)]
-
logger_stderr (Logger): Logger to be used for stderr [default: Logger.new(STDERR)]
-
config (Config): Config to be used. [default: Config.new]
-
cmd_runner (CmdRunner): Command executor to be used. [default: CmdRunner.new]
-
nodes_handler (NodesHandler): Nodes handler to be used. [default: NodesHandler.new]
-
actions_executor (ActionsExecutor): Actions Executor to be used. [default: ActionsExecutor.new]
-
services_handler (ServicesHandler): Services Handler to be used. [default: ServicesHandler.new]
80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 |
# File 'lib/hybrid_platforms_conductor/deployer.rb', line 80 def initialize( logger: Logger.new(STDOUT), logger_stderr: Logger.new(STDERR), config: Config.new, cmd_runner: CmdRunner.new, nodes_handler: NodesHandler.new, actions_executor: ActionsExecutor.new, services_handler: ServicesHandler.new ) init_loggers(logger, logger_stderr) @config = config @cmd_runner = cmd_runner @nodes_handler = nodes_handler @actions_executor = actions_executor @services_handler = services_handler @secrets = [] @provisioners = Plugins.new(:provisioner, logger: @logger, logger_stderr: @logger_stderr) # Default values @use_why_run = false @timeout = nil @concurrent_execution = false @local_environment = false @nbr_retries_on_error = 0 end |
Instance Attribute Details
#concurrent_execution ⇒ Object
Concurrent execution of the deployment? [default = false]
Boolean
56 57 58 |
# File 'lib/hybrid_platforms_conductor/deployer.rb', line 56 def concurrent_execution @concurrent_execution end |
#local_environment ⇒ Object
Are we deploying in a local environment?
Boolean
64 65 66 |
# File 'lib/hybrid_platforms_conductor/deployer.rb', line 64 def local_environment @local_environment end |
#nbr_retries_on_error ⇒ Object
Number of retries to do in case of non-deterministic errors during deployment
Integer
68 69 70 |
# File 'lib/hybrid_platforms_conductor/deployer.rb', line 68 def nbr_retries_on_error @nbr_retries_on_error end |
#secrets ⇒ Object
The list of JSON secrets
Array<Hash>
60 61 62 |
# File 'lib/hybrid_platforms_conductor/deployer.rb', line 60 def secrets @secrets end |
#timeout ⇒ Object
Timeout (in seconds) to be used for each deployment, or nil for no timeout [default = nil]
Integer or nil
52 53 54 |
# File 'lib/hybrid_platforms_conductor/deployer.rb', line 52 def timeout @timeout end |
#use_why_run ⇒ Object
Do we use why-run mode while deploying? [default = false]
Boolean
48 49 50 |
# File 'lib/hybrid_platforms_conductor/deployer.rb', line 48 def use_why_run @use_why_run end |
Instance Method Details
#deploy_on(*nodes_selectors) ⇒ Object
Deploy on a given list of nodes selectors. The workflow is the following:
-
Package the services to be deployed, considering the nodes, services and context (options, secrets, environment…)
-
Deploy on the nodes (once per node to be deployed)
-
Save deployment logs (in case of real deployment)
- Parameters
-
nodes_selectors (Array<Object>): The list of nodes selectors we will deploy to.
- Result
-
Hash<String, [Integer or Symbol, String, String]>: Exit status code (or Symbol in case of error or dry run), standard output and error for each node that has been deployed.
171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 |
# File 'lib/hybrid_platforms_conductor/deployer.rb', line 171 def deploy_on(*nodes_selectors) # Get the sorted list of services to be deployed, per node # Hash<String, Array<String> > services_to_deploy = Hash[@nodes_handler.select_nodes(nodes_selectors.flatten).map do |node| [node, @nodes_handler.get_services_of(node)] end] # Get the secrets to be deployed secrets = {} @secrets.each do |secret_json| secrets.merge!(secret_json) do |key, value1, value2| raise "Secret #{key} has conflicting values between different secret JSON files." if value1 != value2 value1 end end # Check that we are allowed to deploy unless @use_why_run reason_for_interdiction = @services_handler.deploy_allowed?( services: services_to_deploy, secrets: secrets, local_environment: @local_environment ) raise "Deployment not allowed: #{reason_for_interdiction}" unless reason_for_interdiction.nil? end # Package the deployment # Protect packaging by a Futex Futex.new(PACKAGING_FUTEX_FILE, timeout: @config.packaging_timeout_secs).open do section 'Packaging deployment' do @services_handler.package( services: services_to_deploy, secrets: secrets, local_environment: @local_environment ) end end # Prepare the deployment as a whole, before getting individual deployment actions. # Do this after packaging, this way we ensure that services packaging cannot depend on the way deployment will be performed. @services_handler.prepare_for_deploy( services: services_to_deploy, secrets: secrets, local_environment: @local_environment, why_run: @use_why_run ) # Launch deployment processes results = {} section "#{@use_why_run ? 'Checking' : 'Deploying'} on #{services_to_deploy.keys.size} nodes" do # Prepare all the control masters here, as they will be reused for the whole process, including mutexes, deployment and logs saving @actions_executor.with_connections_prepared_to(services_to_deploy.keys, no_exception: true) do nbr_retries = @nbr_retries_on_error remaining_nodes_to_deploy = services_to_deploy.keys while nbr_retries >= 0 && !remaining_nodes_to_deploy.empty? last_deploy_results = deploy(services_to_deploy.slice(*remaining_nodes_to_deploy)) if nbr_retries > 0 # Check if we need to retry deployment on some nodes # Only parse the last deployment attempt logs retriable_nodes = Hash[ remaining_nodes_to_deploy. map do |node| exit_status, stdout, stderr = last_deploy_results[node] if exit_status == 0 nil else retriable_errors = retriable_errors_from(node, exit_status, stdout, stderr) if retriable_errors.empty? nil else # Log the issue in the stderr of the deployment stderr << "!!! #{retriable_errors.size} retriable errors detected in this deployment:\n#{retriable_errors.map { |error| "* #{error}" }.join("\n")}\n" [node, retriable_errors] end end end. compact ] unless retriable_nodes.empty? log_warn " Retry deployment for \#{retriable_nodes.size} nodes as they got non-deterministic errors (\#{nbr_retries} retries remaining):\n \#{retriable_nodes.map { |node, retriable_errors| \" * \#{node}:\\n\#{retriable_errors.map { |error| \" - \#{error}\" }.join(\"\\n\")}\" }.join(\"\\n\")}\n EOS\n end\n remaining_nodes_to_deploy = retriable_nodes.keys\n end\n # Merge deployment results\n results.merge!(last_deploy_results) do |node, (exit_status_1, stdout_1, stderr_1), (exit_status_2, stdout_2, stderr_2)|\n [\n exit_status_2,\n <<~EOS,\n \#{stdout_1}\n Deployment exit status code: \#{exit_status_1}\n !!! Retry deployment due to non-deterministic error (\#{nbr_retries} remaining attempts)...\n \#{stdout_2}\n EOS\n <<~EOS\n \#{stderr_1}\n !!! Retry deployment due to non-deterministic error (\#{nbr_retries} remaining attempts)...\n \#{stderr_2}\n EOS\n ]\n end\n nbr_retries -= 1\n end\n\n end\n end\n results\nend\n".strip |
#deployment_info_from(*nodes) ⇒ Object
Get deployment info from a list of nodes selectors
- Parameters
-
nodes (Array<String>): Nodes to get info from
- Result
-
Hash<String, Hash<Symbol,Object>: The deployed info, per node name. Properties are defined by the Deployer#save_logs method, and additionally to them the following properties can be set:
-
error (String): Optional property set in case of error
-
347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 |
# File 'lib/hybrid_platforms_conductor/deployer.rb', line 347 def deployment_info_from(*nodes) @actions_executor.max_threads = 64 Hash[@actions_executor. execute_actions( Hash[nodes.flatten.map do |node| [ node, { remote_bash: "cd /var/log/deployments && ls -t | head -1 | xargs sed '/===== STDOUT =====/q'" } ] end], log_to_stdout: false, concurrent: true, timeout: 10, progress_name: 'Getting deployment info' ). map do |node, (exit_status, stdout, stderr)| # Expected format for stdout: # Property1: Value1 # ... # PropertyN: ValueN # ===== STDOUT ===== # ... deploy_info = {} if exit_status.is_a?(Symbol) deploy_info[:error] = "Error: #{exit_status}\n#{stderr}" else stdout_lines = stdout.split("\n") if stdout_lines.first =~ /No such file or directory/ deploy_info[:error] = '/var/log/deployments missing' else stdout_lines.each do |line| if line =~ /^([^:]+): (.+)$/ key_str, value = $1, $2 key = key_str.to_sym # Type-cast some values case key_str when 'date' # Date and time values # Thu Nov 23 18:43:01 UTC 2017 deploy_info[key] = Time.parse(value) when 'debug' # Boolean values # Yes deploy_info[key] = (value == 'Yes') when /^diff_files_.+$/, 'services' # Array of strings # my_file.txt, other_file.txt deploy_info[key] = value.split(', ') else deploy_info[key] = value end else deploy_info[:unknown_lines] = [] unless deploy_info.key?(:unknown_lines) deploy_info[:unknown_lines] << line end end end end [ node, deploy_info ] end ] end |
#options_parse(options_parser, parallel_switch: true, why_run_switch: false, timeout_options: true) ⇒ Object
Complete an option parser with options meant to control this Deployer
- Parameters
-
options_parser (OptionParser): The option parser to complete
-
parallel_switch (Boolean): Do we allow parallel execution to be switched? [default = true]
-
why_run_switch (Boolean): Do we allow the why-run mode to be switched? [default = false]
-
timeout_options (Boolean): Do we allow timeout options? [default = true]
112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 |
# File 'lib/hybrid_platforms_conductor/deployer.rb', line 112 def (, parallel_switch: true, why_run_switch: false, timeout_options: true) .separator '' .separator 'Deployer options:' .on( '-e', '--secrets SECRETS_LOCATION', 'Specify a secrets location. Can be specified several times. Location can be:', '* Local path to a JSON file', '* URL of the form http[s]://<url>:<secret_id> to get a secret JSON file from a Thycotic Secret Server at the given URL.' ) do |secrets_location| @secrets << JSON.parse( if secrets_location =~ /^(https?:\/\/.+):(\d+)$/ url = $1 secret_id = $2 secret = nil Thycotic.with_thycotic(url, @logger, @logger_stderr) do |thycotic| secret_file_item_id = thycotic.get_secret(secret_id).dig(:secret, :items, :secret_item, :id) raise "Unable to fetch secret file ID #{secrets_location}" if secret_file_item_id.nil? secret = thycotic.(secret_id, secret_file_item_id) raise "Unable to fetch secret file attachment from #{secrets_location}" if secret.nil? end secret else raise "Missing secret file: #{secrets_location}" unless File.exist?(secrets_location) File.read(secrets_location) end ) end .on('-p', '--parallel', 'Execute the commands in parallel (put the standard output in files <hybrid-platforms-dir>/run_logs/*.stdout)') do @concurrent_execution = true end if parallel_switch .on('-t', '--timeout SECS', "Timeout in seconds to wait for each chef run. Only used in why-run mode. (defaults to #{@timeout.nil? ? 'no timeout' : @timeout})") do |nbr_secs| @timeout = nbr_secs.to_i end if .on('-W', '--why-run', 'Use the why-run mode to see what would be the result of the deploy instead of deploying it for real.') do @use_why_run = true end if why_run_switch .on('--retries-on-error NBR', "Number of retries in case of non-deterministic errors (defaults to #{@nbr_retries_on_error})") do |nbr_retries| @nbr_retries_on_error = nbr_retries.to_i end end |
#parse_deploy_output(node, stdout, stderr) ⇒ Object
Parse stdout and stderr of a given deploy run and get the list of tasks with their status
- Parameters
-
node (String): Node for which this deploy run has been done.
-
stdout (String): stdout to be parsed.
-
stderr (String): stderr to be parsed.
- Result
-
Array< Hash<Symbol,Object> >: List of task properties. The following properties should be returned, among free ones:
-
name (String): Task name
-
status (Symbol): Task status. Should be on of:
-
:changed: The task has been changed
-
:identical: The task has not been changed
-
-
diffs (String): Differences, if any
-
426 427 428 |
# File 'lib/hybrid_platforms_conductor/deployer.rb', line 426 def parse_deploy_output(node, stdout, stderr) @services_handler.parse_deploy_output(stdout, stderr).map { |deploy_info| deploy_info[:tasks] }.flatten end |
#validate_params ⇒ Object
Validate that parsed parameters are valid
154 155 156 |
# File 'lib/hybrid_platforms_conductor/deployer.rb', line 154 def validate_params raise 'Can\'t have a timeout unless why-run mode. Please don\'t use --timeout without --why-run.' if !@timeout.nil? && !@use_why_run end |
#with_test_provisioned_instance(provisioner_id, node, environment:, reuse_instance: false) ⇒ Object
Provision a test instance for a given node.
- Parameters
-
provisioner_id (Symbol): The provisioner ID to be used
-
node (String): The node for which we want the image
-
environment (String): An ID to differentiate different running instances for the same node
-
reuse_instance (Boolean): Do we reuse an eventual existing instance? [default: false]
-
Proc: Code called when the container is ready. The container will be stopped at the end of execution.
- Parameters
-
deployer (Deployer): A new Deployer configured to override access to the node through the Docker container
-
instance (Provisioner): The provisioned instance
295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 |
# File 'lib/hybrid_platforms_conductor/deployer.rb', line 295 def with_test_provisioned_instance(provisioner_id, node, environment:, reuse_instance: false) # Add the user to the environment to better track belongings on shared provisioners environment = "#{@cmd_runner.whoami}_#{environment}" # Add PID, TID and a random number to the ID to make sure other containers used by other runs are not being reused. environment << "_#{Process.pid}_#{Thread.current.object_id}_#{SecureRandom.hex(8)}" unless reuse_instance # Create different NodesHandler and Deployer to handle this Docker container in place of the real node. sub_logger, sub_logger_stderr = if log_debug? [@logger, @logger_stderr] else [Logger.new(StringIO.new, level: :info), Logger.new(StringIO.new, level: :info)] end begin sub_executable = Executable.new(logger: sub_logger, logger_stderr: sub_logger_stderr) instance = @provisioners[provisioner_id].new( node, environment: environment, logger: @logger, logger_stderr: @logger_stderr, config: @config, cmd_runner: @cmd_runner, # Here we use the NodesHandler that will be bound to the sub-Deployer only, as the node's metadata might be modified by the Provisioner. nodes_handler: sub_executable.nodes_handler, actions_executor: @actions_executor ) instance.with_running_instance(stop_on_exit: true, destroy_on_exit: !reuse_instance, port: 22) do actions_executor = sub_executable.actions_executor deployer = sub_executable.deployer # Setup test environment for this container actions_executor.connector(:ssh).ssh_user = 'root' actions_executor.connector(:ssh).passwords[node] = 'root_pwd' deployer.local_environment = true # Ignore secrets that might have been given: in Docker containers we always use dummy secrets deployer.secrets = [JSON.parse(File.read("#{@config.hybrid_platforms_dir}/dummy_secrets.json"))] yield deployer, instance end rescue # Make sure Docker logs are being output to better investigate errors if we were not already outputing them in debug mode stdouts = sub_executable.stdouts_to_s log_error "[ #{node}/#{environment} ] - Encountered unhandled exception #{$!}\n#{$!.backtrace.join("\n")}\n-----\n#{stdouts}" unless stdouts.nil? raise end end |