Class: Fluent::S3Output

Inherits:
TimeSlicedOutput
  • Object
show all
Includes:
Mixin::ConfigPlaceholders, SetTagKeyMixin, SetTimeKeyMixin
Defined in:
lib/fluent/plugin/out_s3.rb

Instance Attribute Summary collapse

Instance Method Summary collapse

Constructor Details

#initializeS3Output

Returns a new instance of S3Output.



12
13
14
15
16
17
18
19
20
21
# File 'lib/fluent/plugin/out_s3.rb', line 12

def initialize
  super
  require 'aws-sdk'
  require 'zlib'
  require 'time'
  require 'tempfile'
  require 'open3'

  @use_ssl = true
end

Instance Attribute Details

#bucketObject (readonly)

Returns the value of attribute bucket.



43
44
45
# File 'lib/fluent/plugin/out_s3.rb', line 43

def bucket
  @bucket
end

Instance Method Details

#configure(conf) ⇒ Object



51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
# File 'lib/fluent/plugin/out_s3.rb', line 51

def configure(conf)
  super

  if format_json = conf['format_json']
    @format_json = true
  else
    @format_json = false
  end

  if use_ssl = conf['use_ssl']
    if use_ssl.empty?
      @use_ssl = true
    else
      @use_ssl = Config.bool_value(use_ssl)
      if @use_ssl.nil?
        raise ConfigError, "'true' or 'false' is required for use_ssl option on s3 output"
      end
    end
  end

  @ext, @mime_type = case @store_as
    when 'gzip' then ['gz', 'application/x-gzip']
    when 'lzo' then
      begin
        Open3.capture3('lzop -V')
      rescue Errno::ENOENT
        raise ConfigError, "'lzop' utility must be in PATH for LZO compression"
      end
      ['lzo', 'application/x-lzop']
    when 'json' then ['json', 'application/json']
    else ['txt', 'text/plain']
  end

  @timef = TimeFormatter.new(@time_format, @localtime)

  if @localtime
    @path_slicer = Proc.new {|path|
      Time.now.strftime(path)
    }
  else
    @path_slicer = Proc.new {|path|
      Time.now.utc.strftime(path)
    }
  end

end

#format(tag, time, record) ⇒ Object



116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
# File 'lib/fluent/plugin/out_s3.rb', line 116

def format(tag, time, record)
  if @include_time_key || !@format_json
    time_str = @timef.format(time)
  end

  # copied from each mixin because current TimeSlicedOutput can't support mixins.
  if @include_tag_key
    record[@tag_key] = tag
  end
  if @include_time_key
    record[@time_key] = time_str
  end

  if @format_json
    Yajl.dump(record) + "\n"
  else
    "#{time_str}\t#{tag}\t#{Yajl.dump(record)}\n"
  end
end

#placeholdersObject



47
48
49
# File 'lib/fluent/plugin/out_s3.rb', line 47

def placeholders
  [:percent]
end

#startObject



98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
# File 'lib/fluent/plugin/out_s3.rb', line 98

def start
  super
  options = {}
  if @aws_key_id && @aws_sec_key
    options[:access_key_id] = @aws_key_id
    options[:secret_access_key] = @aws_sec_key
  end
  options[:s3_endpoint] = @s3_endpoint if @s3_endpoint
  options[:proxy_uri] = @proxy_uri if @proxy_uri
  options[:use_ssl] = @use_ssl

  @s3 = AWS::S3.new(options)
  @bucket = @s3.buckets[@s3_bucket]

  ensure_bucket
  check_apikeys if @check_apikey_on_start
end

#write(chunk) ⇒ Object



136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
# File 'lib/fluent/plugin/out_s3.rb', line 136

def write(chunk)
  i = 0

  begin
    path = @path_slicer.call(@path)
    values_for_s3_object_key = {
      "path" => path,
      "time_slice" => chunk.key,
      "file_extension" => @ext,
      "index" => i
    }
    s3path = @s3_object_key_format.gsub(%r(%{[^}]+})) { |expr|
      values_for_s3_object_key[expr[2...expr.size-1]]
    }
    i += 1
  end while @bucket.objects[s3path].exists?

  tmp = Tempfile.new("s3-")
  begin
    if @store_as == "gzip"
      w = Zlib::GzipWriter.new(tmp)
      chunk.write_to(w)
      w.close
    elsif @store_as == "lzo"
      w = Tempfile.new("chunk-tmp")
      chunk.write_to(w)
      w.close
      tmp.close
      # We don't check the return code because we can't recover lzop failure.
      system "lzop -qf1 -o #{tmp.path} #{w.path}"
    else
      chunk.write_to(tmp)
      tmp.close
    end
    @bucket.objects[s3path].write(Pathname.new(tmp.path), {:content_type => @mime_type,
                                                           :reduced_redundancy => @reduced_redundancy})
  ensure
    tmp.close(true) rescue nil
    w.close rescue nil
    w.unlink rescue nil
  end
end