Class: Configgin

Inherits:
Object
  • Object
show all
Defined in:
lib/configgin.rb,
lib/configgin/version.rb

Overview

Configgin is the main class which puts all the pieces together and configures the container according to the options.

Constant Summary collapse

SVC_ACC_PATH =

SVC_ACC_PATH is the location of the service account secrets

'/var/run/secrets/kubernetes.io/serviceaccount'.freeze
VERSION =
'0.20.3'.freeze

Instance Method Summary collapse

Constructor Details

#initialize(jobs:, env2conf:, bosh_deployment_manifest:, self_name: ENV['HOSTNAME']) ⇒ Configgin

Returns a new instance of Configgin.



18
19
20
21
22
23
# File 'lib/configgin.rb', line 18

def initialize(jobs:, env2conf:, bosh_deployment_manifest:, self_name: ENV['HOSTNAME'])
  @job_configs = JSON.parse(File.read(jobs))
  @templates = YAML.load_file(env2conf)
  @bosh_deployment_manifest = bosh_deployment_manifest
  @self_name = self_name
end

Instance Method Details

#expected_annotations(job_configs, job_digests) ⇒ Object

Given the active jobs, and a hash of the expected annotations for each, return the annotations we expect to be on each pod based on what properties each job imports.



175
176
177
178
179
180
181
182
183
184
185
186
187
# File 'lib/configgin.rb', line 175

def expected_annotations(job_configs, job_digests)
  instance_groups_to_examine = Hash.new { |h, k| h[k] = {} }
  job_configs.each_pair do |job_name, job_config|
    base_config = JSON.parse(File.read(job_config['base']))
    base_config.fetch('consumed_by', {}).values.each do |consumer_jobs|
      consumer_jobs.each do |consumer_job|
        digest_key = "skiff-#{ENV['CONFIGGIN_VERSION_TAG']}-#{instance_group}-#{job_name}"
        instance_groups_to_examine[consumer_job['role']][digest_key] = job_digests[job_name]
      end
    end
  end
  instance_groups_to_examine
end

#export_job_properties(jobs) ⇒ Object

Write exported properties to secret and update annotations on importing stateful sets.



71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
# File 'lib/configgin.rb', line 71

def export_job_properties(jobs)
  # Co-located containers don't get to export properties.
  return unless instance_group == ENV["KUBERNETES_CONTAINER_NAME"]
  # Jobs (errands) and unowned pods (tests) don't export properties.
  return unless self_pod['metadata']['ownerReferences']
  return unless self_pod['metadata']['ownerReferences'][0]['kind'] == "StatefulSet"

  sts = kube_client_stateful_set.get_stateful_set(instance_group, kube_namespace)

  # Make sure the secret attached to the stateful set exists.
  secret = Kubeclient::Resource.new
  secret. = {
    name: sts..name,
    namespace: kube_namespace,
    ownerReferences:  [
      {
        apiVersion: sts.apiVersion,
        blockOwnerDeletion: false,
        controller: false,
        kind: sts.kind,
        name: sts..name,
        uid: sts..uid,
      }
    ]
  }
  begin
    kube_client.create_secret(secret)
  rescue
  end
  secret = kube_client.get_secret(instance_group, kube_namespace)
  secret.data ||= {}

  # version tag changes whenever the chart version or the secrets generation changes
  version_tag = ENV["CONFIGGIN_VERSION_TAG"]
  new_tag = !secret.data[version_tag]
  secret.data = {version_tag => ""} if new_tag # make sure old properties are deleted during upgrade

  digests = {}
  jobs.each do |name, job|
    secret.data["skiff-exported-properties-#{name}"] = Base64.strict_encode64(job.exported_properties.to_json)
    digests[name] = property_digest(job.exported_properties)

    # Record initial digest values whenever the tag changes, in which case the pod startup
    # order is already controlled by the "CONFIGGIN_IMPORT_#{role}" references to the new
    # tags in the corresponding secrets. There is no annotation when importing this set of
    # initial values because the helm chart doesn't include any annotations, and we don't
    # want to trigger a pod restart by adding them.
    encoded_digest = Base64.strict_encode64(digests[name])
    if new_tag
      secret.data["skiff-initial-digest-#{name}"] = encoded_digest
    end
    if secret.data["skiff-initial-digest-#{name}"] == encoded_digest
      digests[name] = nil
    end
  end
  kube_client.update_secret(secret)

  # Some pods might depend on the properties exported by this pod; add annotations
  # to the template spec of the stateful sets so that the pods will be restarted if
  # the exported values have changed from the initial values.
  expected_annotations(@job_configs, digests).each_pair do |instance_group_name, digests|
    # Avoid restarting our own pod
    next if instance_group_name == instance_group

    begin
      sts = kube_client_stateful_set.get_stateful_set(instance_group_name, kube_namespace)
    rescue KubeException => e
      begin
        begin
          response = JSON.parse(e.response || '')
        rescue JSON::ParseError
          response = {}
        end
        if response['reason'] == 'NotFound'
          # The StatefulSet can be missing if we're configured to not have an optional instance group.
          warn "Skipping patch of non-existant StatefulSet #{instance_group_name}"
          next
        end
        warn "Error fetching stateful set #{instance_group_name}: #{response.to_json}"
        raise
      end
    end

    # Update annotations to match digests for current property values. The stateful set will
    # only restarts pods when the checksum of the pod spec changes, so no-op "updates" are ok.
    annotations = {}
    sts.spec.template..annotations.each_pair do |key, value|
      annotations[key] = value
    end
    digests.each_pair do |key, value|
      annotations[key] = value
    end

    kube_client_stateful_set.merge_patch_stateful_set(
      instance_group_name,
      { spec: { template: { metadata: { annotations: annotations } } } },
      kube_namespace
    )
  end
end

#generate_jobs(job_configs, templates) ⇒ Object



31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
# File 'lib/configgin.rb', line 31

def generate_jobs(job_configs, templates)
  jobs = {}
  job_configs.each do |job, job_config|
    base_config = JSON.parse(File.read(job_config['base']))

    begin
      bosh_spec = EnvironmentConfigTransmogrifier.transmogrify(base_config, templates, secrets: '/etc/secrets')

      if @bosh_deployment_manifest
        manifest = BoshDeploymentManifest.new(@bosh_deployment_manifest)
        bosh_spec = BoshDeploymentManifestConfigTransmogrifier.transmogrify(bosh_spec, instance_group, manifest)
      end
    rescue NonHashValueOverride => e
      STDERR.puts e.to_s
      STDERR.puts "Error generating #{job}: #{outfile} from #{infile}"
      exit 1
    end

    jobs[job] = Job.new(
      spec: bosh_spec,
      namespace: kube_namespace,
      client: kube_client,
      client_stateful_set: kube_client_stateful_set,
      self_name: @self_name
    )
  end
  jobs
end

#kube_namespaceObject



189
190
191
# File 'lib/configgin.rb', line 189

def kube_namespace
  @kube_namespace ||= File.read("#{SVC_ACC_PATH}/namespace")
end

#kube_tokenObject



193
194
195
# File 'lib/configgin.rb', line 193

def kube_token
  @kube_token ||= ENV['CONFIGGIN_SA_TOKEN'] || File.read("#{SVC_ACC_PATH}/token")
end

#render_job_templates(jobs, job_configs) ⇒ Object



60
61
62
63
64
65
66
67
68
# File 'lib/configgin.rb', line 60

def render_job_templates(jobs, job_configs)
  jobs.each do |job_name, job|
    dns_encoder = KubeDNSEncoder.new(job.spec['links'])

    job_configs[job_name]['files'].each do |infile, outfile|
      job.generate(infile, outfile, dns_encoder)
    end
  end
end

#runObject



25
26
27
28
29
# File 'lib/configgin.rb', line 25

def run
  jobs = generate_jobs(@job_configs, @templates)
  export_job_properties(jobs)
  render_job_templates(jobs, @job_configs)
end