Class: Tus::Storage::S3

Inherits:
Object
  • Object
show all
Defined in:
lib/tus/storage/s3.rb

Constant Summary collapse

MIN_PART_SIZE =

5MB is the minimum part size for S3 multipart uploads

5 * 1024 * 1024

Instance Attribute Summary collapse

Instance Method Summary collapse

Constructor Details

#initialize(bucket:, prefix: nil, upload_options: {}, concurrency: {}, thread_count: nil, **client_options) ⇒ S3

Initializes an aws-sdk-s3 client with the given credentials.



22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
# File 'lib/tus/storage/s3.rb', line 22

def initialize(bucket:, prefix: nil, upload_options: {}, concurrency: {}, thread_count: nil, **client_options)
  fail ArgumentError, "the :bucket option was nil" unless bucket

  if thread_count
    warn "[Tus-Ruby-Server] :thread_count is deprecated and will be removed in the next major version, use :concurrency instead, e.g `concurrency: { concatenation: 20 }`"
    concurrency[:concatenation] = thread_count
  end

  resource = Aws::S3::Resource.new(**client_options)

  @client         = resource.client
  @bucket         = resource.bucket(bucket)
  @prefix         = prefix
  @upload_options = upload_options
  @concurrency    = concurrency
end

Instance Attribute Details

#bucketObject (readonly)

Returns the value of attribute bucket.



19
20
21
# File 'lib/tus/storage/s3.rb', line 19

def bucket
  @bucket
end

#clientObject (readonly)

Returns the value of attribute client.



19
20
21
# File 'lib/tus/storage/s3.rb', line 19

def client
  @client
end

#concurrencyObject (readonly)

Returns the value of attribute concurrency.



19
20
21
# File 'lib/tus/storage/s3.rb', line 19

def concurrency
  @concurrency
end

#prefixObject (readonly)

Returns the value of attribute prefix.



19
20
21
# File 'lib/tus/storage/s3.rb', line 19

def prefix
  @prefix
end

#upload_optionsObject (readonly)

Returns the value of attribute upload_options.



19
20
21
# File 'lib/tus/storage/s3.rb', line 19

def upload_options
  @upload_options
end

Instance Method Details

#concatenate(uid, part_uids, info = {}) ⇒ Object

Concatenates multiple partial uploads into a single upload, and returns the size of the resulting upload. The partial uploads are deleted after concatenation.

Internally it creates a new multipart upload, copies objects of the given partial uploads into multipart parts, and finalizes the multipart upload.

The multipart upload is automatically aborted in case of an exception.



73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
# File 'lib/tus/storage/s3.rb', line 73

def concatenate(uid, part_uids, info = {})
  multipart_upload = create_file(uid, info)

  objects = part_uids.map { |part_uid| object(part_uid) }
  parts   = copy_parts(objects, multipart_upload)

  info["multipart_parts"].concat parts

  finalize_file(uid, info)

  delete(part_uids.flat_map { |part_uid| [object(part_uid), object("#{part_uid}.info")] })

  # Tus server requires us to return the size of the concatenated file.
  object = client.head_object(bucket: bucket.name, key: object(uid).key)
  object.content_length
rescue => error
  abort_multipart_upload(multipart_upload) if multipart_upload
  raise error
end

#create_file(uid, info = {}) ⇒ Object

Initiates multipart upload for the given upload, and stores its information inside the info hash.



41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
# File 'lib/tus/storage/s3.rb', line 41

def create_file(uid, info = {})
  tus_info = Tus::Info.new(info)

  options = upload_options.dup
  options[:content_type] = tus_info.["content_type"]

  if filename = tus_info.["filename"]
    # Aws-sdk-s3 doesn't sign non-ASCII characters correctly, and browsers
    # will automatically URI-decode filenames.
    filename = CGI.escape(filename).gsub("+", " ")

    options[:content_disposition] ||= "inline"
    options[:content_disposition]  += "; filename=\"#{filename}\""
  end

  multipart_upload = object(uid).initiate_multipart_upload(options)

  info["multipart_id"]    = multipart_upload.id
  info["multipart_parts"] = []

  multipart_upload
end

#delete_file(uid, info = {}) ⇒ Object

Deletes resources for the specified upload. If multipart upload is still in progress, aborts the multipart upload, otherwise deletes the object.



197
198
199
200
201
202
203
204
205
206
# File 'lib/tus/storage/s3.rb', line 197

def delete_file(uid, info = {})
  if info["multipart_id"]
    multipart_upload = object(uid).multipart_upload(info["multipart_id"])
    abort_multipart_upload(multipart_upload)

    delete [object("#{uid}.info")]
  else
    delete [object(uid), object("#{uid}.info")]
  end
end

#expire_files(expiration_date) ⇒ Object

Deletes resources of uploads older than the specified date. For multipart uploads still in progress, it checks the upload date of the last multipart part.



211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
# File 'lib/tus/storage/s3.rb', line 211

def expire_files(expiration_date)
  old_objects = bucket.objects.select do |object|
    object.last_modified <= expiration_date
  end

  delete(old_objects)

  bucket.multipart_uploads.each do |multipart_upload|
    # no need to check multipart uploads initiated before expiration date
    next if multipart_upload.initiated > expiration_date

    most_recent_part = multipart_upload.parts.sort_by(&:last_modified).last
    if most_recent_part.nil? || most_recent_part.last_modified <= expiration_date
      abort_multipart_upload(multipart_upload)
    end
  end
end

#file_url(uid, info = {}, content_type: nil, content_disposition: nil, **options) ⇒ Object

Returns a signed expiring URL to the S3 object.



187
188
189
190
191
192
# File 'lib/tus/storage/s3.rb', line 187

def file_url(uid, info = {}, content_type: nil, content_disposition: nil, **options)
  options[:response_content_type]        ||= content_type
  options[:response_content_disposition] ||= content_disposition

  object(uid).presigned_url(:get, **options)
end

#finalize_file(uid, info = {}) ⇒ Object

Completes the multipart upload using the part information saved in the info hash.



149
150
151
152
153
154
155
156
157
158
159
160
# File 'lib/tus/storage/s3.rb', line 149

def finalize_file(uid, info = {})
  upload_id = info["multipart_id"]
  parts = info["multipart_parts"].map do |part|
    { part_number: part["part_number"], etag: part["etag"] }
  end

  multipart_upload = object(uid).multipart_upload(upload_id)
  multipart_upload.complete(multipart_upload: { parts: parts })

  info.delete("multipart_id")
  info.delete("multipart_parts")
end

#get_file(uid, info = {}, range: nil) ⇒ Object

Returns a Tus::Response object through which data of the specified upload can be retrieved in a streaming fashion. Accepts an optional range parameter for selecting a subset of bytes to retrieve.



179
180
181
182
183
184
# File 'lib/tus/storage/s3.rb', line 179

def get_file(uid, info = {}, range: nil)
  range  = "bytes=#{range.begin}-#{range.end}" if range
  chunks = object(uid).enum_for(:get, range: range)

  Tus::Response.new(chunks: chunks)
end

#patch_file(uid, input, info = {}) ⇒ Object

Appends data to the specified upload in a streaming fashion, and returns the number of bytes it managed to save.

The data read from the input is first buffered in memory, and once 5MB (AWS S3’s mininum allowed size for a multipart part) or more data has been retrieved, it starts being uploaded in a background thread as the next multipart part. This allows us to start reading the next chunk of input data and soon as possible, achieving streaming.

If any network error is raised during the upload to S3, the upload of further input data stops and the number of bytes that manged to get uploaded is returned.



105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
# File 'lib/tus/storage/s3.rb', line 105

def patch_file(uid, input, info = {})
  tus_info = Tus::Info.new(info)

  upload_id      = info["multipart_id"]
  part_offset    = info["multipart_parts"].count
  bytes_uploaded = 0

  jobs = []
  chunk = input.read(MIN_PART_SIZE)

  while chunk
    next_chunk = input.read(MIN_PART_SIZE)

    # merge next chunk into previous if it's smaller than minimum chunk size
    if next_chunk && next_chunk.bytesize < MIN_PART_SIZE
      chunk << next_chunk
      next_chunk.clear
      next_chunk = nil
    end

    # abort if chunk is smaller than 5MB and is not the last chunk
    if chunk.bytesize < MIN_PART_SIZE
      break if (tus_info.length && tus_info.offset) &&
               chunk.bytesize + tus_info.offset < tus_info.length
    end

    begin
      part = upload_part(chunk, uid, upload_id, part_offset += 1)
      info["multipart_parts"] << part
      bytes_uploaded += chunk.bytesize
    rescue Seahorse::Client::NetworkingError => exception
      warn "ERROR: #{exception.inspect} occurred during upload"
      break # ignore networking errors and return what client has uploaded so far
    end

    chunk.clear
    chunk = next_chunk
  end

  bytes_uploaded
end

#read_info(uid) ⇒ Object

Returns info of the specified upload. Raises Tus::NotFound if the upload wasn’t found.



164
165
166
167
168
169
# File 'lib/tus/storage/s3.rb', line 164

def read_info(uid)
  response = object("#{uid}.info").get
  JSON.parse(response.body.string)
rescue Aws::S3::Errors::NoSuchKey
  raise Tus::NotFound
end

#update_info(uid, info) ⇒ Object

Updates info of the specified upload.



172
173
174
# File 'lib/tus/storage/s3.rb', line 172

def update_info(uid, info)
  object("#{uid}.info").put(body: info.to_json)
end