Class: Backarch::Snapshot::Elasticsearch

Inherits:
Backarch::Snapshot show all
Defined in:
lib/backarch/snapshot/elasticsearch.rb

Instance Method Summary collapse

Methods inherited from Backarch::Snapshot

run, #snapshot_destination, #snapshot_name

Instance Method Details

#deleteObject



56
57
58
# File 'lib/backarch/snapshot/elasticsearch.rb', line 56

def delete
  # NOOP
end

#es_index_flush(index) ⇒ Object



60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
# File 'lib/backarch/snapshot/elasticsearch.rb', line 60

def es_index_flush index
  begin
    AWS::SimpleDB.consistent_reads do
      begin
        sdb = AWS::SimpleDB.new
        domain = sdb.domains.create(Config.config["aws"]["path"].gsub('/','-'))
        lock = domain.items["#{index}-lock"]

        LOG.info("Current lock before flush: #{lock.attributes['owner'].values}")
        unless lock.attributes['owner'].values.count > 0 ## Lock already taken
          LOG.info "Flushing index #{index}"
          `curl -s -XPOST "http://localhost:9200/#{index}/_flush"`

          LOG.info "Disabling flush of transaction logs for #{index}"
          `curl -s -XPUT "http://localhost:9200/#{index}/_settings?pretty=true" -d '{"index":{"translog":{"disable_flush":"true"}}}'`
        end
        lock.attributes['owner'].add(NODE_NAME)
        LOG.info("Current lock after flush: #{lock.attributes['owner'].values}")

        yield
      ensure
        lock.attributes['owner'].delete(NODE_NAME) rescue nil
        owners = lock.attributes['owner'].values rescue []
        LOG.info("Current lock before renable flush: #{owners}")

        unless owners.count > 0 ## Lock still in use. Dont enable flush yet
          LOG.info "Enabling flush of transaction logs for #{index}"
          `curl -s -XPUT "http://localhost:9200/#{index}/_settings?pretty=true" -d '{"index":{"translog":{"disable_flush":"false"}}}'`
          lock.delete if lock
        end
      end
    end
  rescue AWS::SimpleDB::Errors::ServiceUnavailable
    LOG.info "Caught AWS::SimpleDB::Errors::ServiceUnavailable error.  Retrying in 5 seconds"
    retry
  end
end

#requestObject



6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
# File 'lib/backarch/snapshot/elasticsearch.rb', line 6

def request
  snapshot_destination.pass do |dir|
    ::FileUtils.mkdir_p dir
    ::FileUtils.chmod 0700, dir
    ::FileUtils.chown 'root', 'root', dir
  end

  depth = Config.input.split('/').count

  File.open("#{snapshot_destination}/cluster_state.local.json", "w") { |f| f.write `curl -s XGET "http://localhost:9200/_cluster/state?pretty=true&local=true"` }
  File.open("#{snapshot_destination}/cluster_state.global.json", "w") { |f| f.write `curl -s XGET "http://localhost:9200/_cluster/state?pretty=true"` }

  # now lets tar up our data files. these are huge, so lets be nice
  indexes = `find #{Config.input} -mindepth #{depth} -maxdepth #{depth} -type d -wholename '*/indices/*' | sort`.split
  indexes.map do |idx|
    index_name = idx.split('/').last

    LOG.info("Beginning snapshot of index #{index_name}")

    es_index_flush(index_name) do
      settings = `curl -s -XGET "http://localhost:9200/#{index_name}/_settings?pretty=true" 2>/dev/null | sed '1,2d' | sed '$d' | sed '$d'`.chomp
      mappings = `curl -s -XGET "http://localhost:9200/#{index_name}/_mapping?pretty=true" 2>/dev/null | sed '1,2d' | sed '$d' | sed '$d'`.chomp

  # time to create our restore script! oh god scripts creating scripts, this never ends well…
      File.open("#{snapshot_destination}/#{index_name}.restore.sh", 'w') { |file| file.write("#!/bin/bash\n# this script requires \#{index_name}.tar.gz and will restore it into elasticsearch\n# it is ESSENTIAL that the index you are restoring does NOT exist in ES. delete it\n# if it does BEFORE trying to restore data.\n\ncurl -s -XPUT 'http://localhost:9200/\#{index_name}' -d '{\#{settings}},\"mappings\":{\#{mappings}}'\necho\n\n# extract our data files into place\necho \"Restoring index (this may take a while)...\"\ntar xvf -C \#{Config.input} --strip-components=\#{depth} \#{index_name}.tar.gz\necho\n") }

      command = "nice -n 19 tar czf #{snapshot_destination}/#{index_name}.tar.gz -C #{Config.input} #{idx}"
      LOG.info("Creating archive: #{command}")
      `#{command}`
    end
  end
end

#syncObject



52
53
54
# File 'lib/backarch/snapshot/elasticsearch.rb', line 52

def sync
  # NOOP
end