class Aws::S3::Object

Public Class Methods

new(*args) click to toggle source

@overload def initialize(bucket_name, key, options = {})

@param [String] bucket_name
@param [String] key
@option options [Client] :client

@overload def initialize(options = {})

@option options [required, String] :bucket_name
@option options [required, String] :key
@option options [Client] :client
# File lib/aws-sdk-s3/object.rb, line 24
def initialize(*args)
  options = Hash === args.last ? args.pop.dup : {}
  @bucket_name = extract_bucket_name(args, options)
  @key = extract_key(args, options)
  @data = options.delete(:data)
  @client = options.delete(:client) || Client.new(options)
  @waiter_block_warned = false
end

Public Instance Methods

accept_ranges() click to toggle source

Indicates that a range of bytes was specified. @return [String]

# File lib/aws-sdk-s3/object.rb, line 55
def accept_ranges
  data[:accept_ranges]
end
acl() click to toggle source

@return [ObjectAcl]

# File lib/aws-sdk-s3/object.rb, line 1543
def acl
  ObjectAcl.new(
    bucket_name: @bucket_name,
    object_key: @key,
    client: @client
  )
end
archive_status() click to toggle source

The archive state of the head object. @return [String]

# File lib/aws-sdk-s3/object.rb, line 96
def archive_status
  data[:archive_status]
end
bucket() click to toggle source

@return [Bucket]

# File lib/aws-sdk-s3/object.rb, line 1552
def bucket
  Bucket.new(
    name: @bucket_name,
    client: @client
  )
end
bucket_key_enabled() click to toggle source

Indicates whether the object uses an S3 Bucket Key for server-side encryption with Amazon Web Services KMS (SSE-KMS). @return [Boolean]

# File lib/aws-sdk-s3/object.rb, line 230
def bucket_key_enabled
  data[:bucket_key_enabled]
end
bucket_name() click to toggle source

@return [String]

# File lib/aws-sdk-s3/object.rb, line 36
def bucket_name
  @bucket_name
end
cache_control() click to toggle source

Specifies caching behavior along the request/reply chain. @return [String]

# File lib/aws-sdk-s3/object.rb, line 137
def cache_control
  data[:cache_control]
end
client() click to toggle source

@return [Client]

# File lib/aws-sdk-s3/object.rb, line 344
def client
  @client
end
content_disposition() click to toggle source

Specifies presentational information for the object. @return [String]

# File lib/aws-sdk-s3/object.rb, line 143
def content_disposition
  data[:content_disposition]
end
content_encoding() click to toggle source

Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. @return [String]

# File lib/aws-sdk-s3/object.rb, line 151
def content_encoding
  data[:content_encoding]
end
content_language() click to toggle source

The language the content is in. @return [String]

# File lib/aws-sdk-s3/object.rb, line 157
def content_language
  data[:content_language]
end
content_length() click to toggle source

Size of the body in bytes. @return [Integer]

# File lib/aws-sdk-s3/object.rb, line 108
def content_length
  data[:content_length]
end
Also aliased as: size
content_type() click to toggle source

A standard MIME type describing the format of the object data. @return [String]

# File lib/aws-sdk-s3/object.rb, line 163
def content_type
  data[:content_type]
end
copy_from(source, options = {}) click to toggle source

Copies another object to this object. Use `multipart_copy: true` for large objects. This is required for objects that exceed 5GB.

@param [S3::Object, S3::ObjectVersion, S3::ObjectSummary, String, Hash]

source Where to copy object data from. `source` must be one of the
following:

* {Aws::S3::Object}
* {Aws::S3::ObjectSummary}
* {Aws::S3::ObjectVersion}
* Hash - with `:bucket` and `:key` and optional `:version_id`
* String - formatted like `"source-bucket-name/uri-escaped-key"`
  or `"source-bucket-name/uri-escaped-key?versionId=version-id"`

@option options [Boolean] :multipart_copy (false) When `true`,

the object will be copied using the multipart APIs. This is
necessary for objects larger than 5GB and can provide
performance improvements on large objects. Amazon S3 does
not accept multipart copies for objects smaller than 5MB.

@option options [Integer] :content_length Only used when

`:multipart_copy` is `true`. Passing this options avoids a HEAD
request to query the source object size. Raises an `ArgumentError` if
this option is provided when `:multipart_copy` is `false` or not set.

@option options [S3::Client] :copy_source_client Only used when

`:multipart_copy` is `true` and the source object is in a
different region. You do not need to specify this option
if you have provided `:content_length`.

@option options [String] :copy_source_region Only used when

`:multipart_copy` is `true` and the source object is in a
different region. You do not need to specify this option
if you have provided a `:source_client` or a `:content_length`.

@example Basic object copy

bucket = Aws::S3::Bucket.new('target-bucket')
object = bucket.object('target-key')

# source as String
object.copy_from('source-bucket/source-key')

# source as Hash
object.copy_from(bucket:'source-bucket', key:'source-key')

# source as Aws::S3::Object
object.copy_from(bucket.object('source-key'))

@example Managed copy of large objects

# uses multipart upload APIs to copy object
object.copy_from('src-bucket/src-key', multipart_copy: true)

@see copy_to

# File lib/aws-sdk-s3/customizations/object.rb, line 67
def copy_from(source, options = {})
  if Hash === source && source[:copy_source]
    # for backwards compatibility
    @client.copy_object(source.merge(bucket: bucket_name, key: key))
  else
    ObjectCopier.new(self, options).copy_from(source, options)
  end
end
Also aliased as: copy_from
copy_to(target, options = {}) click to toggle source

Copies this object to another object. Use `multipart_copy: true` for large objects. This is required for objects that exceed 5GB.

@note If you need to copy to a bucket in a different region, use

{#copy_from}.

@param [S3::Object, String, Hash] target Where to copy the object

data to. `target` must be one of the following:

* {Aws::S3::Object}
* Hash - with `:bucket` and `:key`
* String - formatted like `"target-bucket-name/target-key"`

@example Basic object copy

bucket = Aws::S3::Bucket.new('source-bucket')
object = bucket.object('source-key')

# target as String
object.copy_to('target-bucket/target-key')

# target as Hash
object.copy_to(bucket: 'target-bucket', key: 'target-key')

# target as Aws::S3::Object
object.copy_to(bucket.object('target-key'))

@example Managed copy of large objects

# uses multipart upload APIs to copy object
object.copy_to('src-bucket/src-key', multipart_copy: true)
# File lib/aws-sdk-s3/customizations/object.rb, line 108
def copy_to(target, options = {})
  ObjectCopier.new(self, options).copy_to(target, options)
end
data() click to toggle source

@return [Types::HeadObjectOutput]

Returns the data for this {Object}. Calls
{Client#head_object} if {#data_loaded?} is `false`.
# File lib/aws-sdk-s3/object.rb, line 367
def data
  load unless @data
  @data
end
data_loaded?() click to toggle source

@return [Boolean]

Returns `true` if this resource is loaded.  Accessing attributes or
{#data} on an unloaded resource will trigger a call to {#load}.
# File lib/aws-sdk-s3/object.rb, line 375
def data_loaded?
  !!@data
end
delete(options = {}) click to toggle source

@example Request syntax with placeholder values

object.delete({
  mfa: "MFA",
  version_id: "ObjectVersionId",
  request_payer: "requester", # accepts requester
  bypass_governance_retention: false,
  expected_bucket_owner: "AccountId",
})

@param [Hash] options ({}) @option options [String] :mfa

The concatenation of the authentication device's serial number, a
space, and the value that is displayed on your authentication device.
Required to permanently delete a versioned object if versioning is
configured with MFA delete enabled.

@option options [String] :version_id

VersionId used to reference a specific version of the object.

@option options [String] :request_payer

Confirms that the requester knows that they will be charged for the
request. Bucket owners need not specify this parameter in their
requests. For information about downloading objects from requester
pays buckets, see [Downloading Objects in Requestor Pays Buckets][1]
in the *Amazon S3 User Guide*.

[1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html

@option options [Boolean] :bypass_governance_retention

Indicates whether S3 Object Lock should bypass Governance-mode
restrictions to process this operation. To use this header, you must
have the `s3:PutBucketPublicAccessBlock` permission.

@option options [String] :expected_bucket_owner

The account ID of the expected bucket owner. If the bucket is owned by
a different account, the request will fail with an HTTP `403 (Access
Denied)` error.

@return [Types::DeleteObjectOutput]

# File lib/aws-sdk-s3/object.rb, line 815
def delete(options = {})
  options = options.merge(
    bucket: @bucket_name,
    key: @key
  )
  resp = @client.delete_object(options)
  resp.data
end
delete_marker() click to toggle source

Specifies whether the object retrieved was (true) or was not (false) a Delete Marker. If false, this response header does not appear in the response. @return [Boolean]

# File lib/aws-sdk-s3/object.rb, line 49
def delete_marker
  data[:delete_marker]
end
download_file(destination, options = {}) click to toggle source

Downloads a file in S3 to a path on disk.

# small files (< 5MB) are downloaded in a single API call
obj.download_file('/path/to/file')

Files larger than 5MB are downloaded using multipart method

# large files are split into parts
# and the parts are downloaded in parallel
obj.download_file('/path/to/very_large_file')

@param [String] destination Where to download the file to.

@option options [String] mode `auto`, `single_request`, `get_range`

`single_request` mode forces only 1 GET request is made in download,
`get_range` mode allows `chunk_size` parameter to configured in
customizing each range size in multipart_download,
By default, `auto` mode is enabled, which performs multipart_download

@option options [String] chunk_size required in get_range mode.

@option options [Integer] thread_count (10) Customize threads used in

the multipart download.

@option options [String] version_id The object version id used to

retrieve the object. For more about object versioning, see:
https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectVersioning.html

@return [Boolean] Returns `true` when the file is downloaded without

any errors.
# File lib/aws-sdk-s3/customizations/object.rb, line 405
def download_file(destination, options = {})
  downloader = FileDownloader.new(client: client)
  downloader.download(
    destination,
    options.merge(bucket: bucket_name, key: key)
  )
  true
end
etag() click to toggle source

An ETag is an opaque identifier assigned by a web server to a specific version of a resource found at a URL. @return [String]

# File lib/aws-sdk-s3/object.rb, line 115
def etag
  data[:etag]
end
exists?(options = {}) click to toggle source

@param [Hash] options ({}) @return [Boolean]

Returns `true` if the Object exists.
# File lib/aws-sdk-s3/object.rb, line 382
def exists?(options = {})
  begin
    wait_until_exists(options.merge(max_attempts: 1))
    true
  rescue Aws::Waiters::Errors::UnexpectedError => e
    raise e.error
  rescue Aws::Waiters::Errors::WaiterFailed
    false
  end
end
expiration() click to toggle source

If the object expiration is configured (see PUT Bucket lifecycle), the response includes this header. It includes the expiry-date and rule-id key-value pairs providing object expiration information. The value of the rule-id is URL encoded. @return [String]

# File lib/aws-sdk-s3/object.rb, line 64
def expiration
  data[:expiration]
end
expires() click to toggle source

The date and time at which the object is no longer cacheable. @return [Time]

# File lib/aws-sdk-s3/object.rb, line 169
def expires
  data[:expires]
end
expires_string() click to toggle source

@return [String]

# File lib/aws-sdk-s3/object.rb, line 174
def expires_string
  data[:expires_string]
end
get(options = {}, &block) click to toggle source

@example Request syntax with placeholder values

object.get({
  if_match: "IfMatch",
  if_modified_since: Time.now,
  if_none_match: "IfNoneMatch",
  if_unmodified_since: Time.now,
  range: "Range",
  response_cache_control: "ResponseCacheControl",
  response_content_disposition: "ResponseContentDisposition",
  response_content_encoding: "ResponseContentEncoding",
  response_content_language: "ResponseContentLanguage",
  response_content_type: "ResponseContentType",
  response_expires: Time.now,
  version_id: "ObjectVersionId",
  sse_customer_algorithm: "SSECustomerAlgorithm",
  sse_customer_key: "SSECustomerKey",
  sse_customer_key_md5: "SSECustomerKeyMD5",
  request_payer: "requester", # accepts requester
  part_number: 1,
  expected_bucket_owner: "AccountId",
})

@param [Hash] options ({}) @option options [String] :if_match

Return the object only if its entity tag (ETag) is the same as the one
specified, otherwise return a 412 (precondition failed).

@option options [Time,DateTime,Date,Integer,String] :if_modified_since

Return the object only if it has been modified since the specified
time, otherwise return a 304 (not modified).

@option options [String] :if_none_match

Return the object only if its entity tag (ETag) is different from the
one specified, otherwise return a 304 (not modified).

@option options [Time,DateTime,Date,Integer,String] :if_unmodified_since

Return the object only if it has not been modified since the specified
time, otherwise return a 412 (precondition failed).

@option options [String] :range

Downloads the specified range bytes of an object. For more information
about the HTTP Range header, see
[https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35][1].

<note markdown="1"> Amazon S3 doesn't support retrieving multiple ranges of data per
`GET` request.

 </note>

[1]: https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35

@option options [String] :response_cache_control

Sets the `Cache-Control` header of the response.

@option options [String] :response_content_disposition

Sets the `Content-Disposition` header of the response

@option options [String] :response_content_encoding

Sets the `Content-Encoding` header of the response.

@option options [String] :response_content_language

Sets the `Content-Language` header of the response.

@option options [String] :response_content_type

Sets the `Content-Type` header of the response.

@option options [Time,DateTime,Date,Integer,String] :response_expires

Sets the `Expires` header of the response.

@option options [String] :version_id

VersionId used to reference a specific version of the object.

@option options [String] :sse_customer_algorithm

Specifies the algorithm to use to when decrypting the object (for
example, AES256).

@option options [String] :sse_customer_key

Specifies the customer-provided encryption key for Amazon S3 used to
encrypt the data. This value is used to decrypt the object when
recovering it and must match the one used when storing the data. The
key must be appropriate for use with the algorithm specified in the
`x-amz-server-side-encryption-customer-algorithm` header.

@option options [String] :sse_customer_key_md5

Specifies the 128-bit MD5 digest of the encryption key according to
RFC 1321. Amazon S3 uses this header for a message integrity check to
ensure that the encryption key was transmitted without error.

@option options [String] :request_payer

Confirms that the requester knows that they will be charged for the
request. Bucket owners need not specify this parameter in their
requests. For information about downloading objects from requester
pays buckets, see [Downloading Objects in Requestor Pays Buckets][1]
in the *Amazon S3 User Guide*.

[1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html

@option options [Integer] :part_number

Part number of the object being read. This is a positive integer
between 1 and 10,000. Effectively performs a 'ranged' GET request
for the part specified. Useful for downloading just a part of an
object.

@option options [String] :expected_bucket_owner

The account ID of the expected bucket owner. If the bucket is owned by
a different account, the request will fail with an HTTP `403 (Access
Denied)` error.

@return [Types::GetObjectOutput]

# File lib/aws-sdk-s3/object.rb, line 919
def get(options = {}, &block)
  options = options.merge(
    bucket: @bucket_name,
    key: @key
  )
  resp = @client.get_object(options, &block)
  resp.data
end
head(options = {}) click to toggle source

@example Request syntax with placeholder values

object.head({
  if_match: "IfMatch",
  if_modified_since: Time.now,
  if_none_match: "IfNoneMatch",
  if_unmodified_since: Time.now,
  range: "Range",
  version_id: "ObjectVersionId",
  sse_customer_algorithm: "SSECustomerAlgorithm",
  sse_customer_key: "SSECustomerKey",
  sse_customer_key_md5: "SSECustomerKeyMD5",
  request_payer: "requester", # accepts requester
  part_number: 1,
  expected_bucket_owner: "AccountId",
})

@param [Hash] options ({}) @option options [String] :if_match

Return the object only if its entity tag (ETag) is the same as the one
specified, otherwise return a 412 (precondition failed).

@option options [Time,DateTime,Date,Integer,String] :if_modified_since

Return the object only if it has been modified since the specified
time, otherwise return a 304 (not modified).

@option options [String] :if_none_match

Return the object only if its entity tag (ETag) is different from the
one specified, otherwise return a 304 (not modified).

@option options [Time,DateTime,Date,Integer,String] :if_unmodified_since

Return the object only if it has not been modified since the specified
time, otherwise return a 412 (precondition failed).

@option options [String] :range

Downloads the specified range bytes of an object. For more information
about the HTTP Range header, see
[http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35][1].

<note markdown="1"> Amazon S3 doesn't support retrieving multiple ranges of data per
`GET` request.

 </note>

[1]: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35

@option options [String] :version_id

VersionId used to reference a specific version of the object.

@option options [String] :sse_customer_algorithm

Specifies the algorithm to use to when encrypting the object (for
example, AES256).

@option options [String] :sse_customer_key

Specifies the customer-provided encryption key for Amazon S3 to use in
encrypting data. This value is used to store the object and then it is
discarded; Amazon S3 does not store the encryption key. The key must
be appropriate for use with the algorithm specified in the
`x-amz-server-side-encryption-customer-algorithm` header.

@option options [String] :sse_customer_key_md5

Specifies the 128-bit MD5 digest of the encryption key according to
RFC 1321. Amazon S3 uses this header for a message integrity check to
ensure that the encryption key was transmitted without error.

@option options [String] :request_payer

Confirms that the requester knows that they will be charged for the
request. Bucket owners need not specify this parameter in their
requests. For information about downloading objects from requester
pays buckets, see [Downloading Objects in Requestor Pays Buckets][1]
in the *Amazon S3 User Guide*.

[1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html

@option options [Integer] :part_number

Part number of the object being read. This is a positive integer
between 1 and 10,000. Effectively performs a 'ranged' HEAD request
for the part specified. Useful querying about the size of the part and
the number of parts in this object.

@option options [String] :expected_bucket_owner

The account ID of the expected bucket owner. If the bucket is owned by
a different account, the request will fail with an HTTP `403 (Access
Denied)` error.

@return [Types::HeadObjectOutput]

# File lib/aws-sdk-s3/object.rb, line 1531
def head(options = {})
  options = options.merge(
    bucket: @bucket_name,
    key: @key
  )
  resp = @client.head_object(options)
  resp.data
end
identifiers() click to toggle source

@deprecated @api private

# File lib/aws-sdk-s3/object.rb, line 1583
def identifiers
  {
    bucket_name: @bucket_name,
    key: @key
  }
end
initiate_multipart_upload(options = {}) click to toggle source

@example Request syntax with placeholder values

multipartupload = object.initiate_multipart_upload({
  acl: "private", # accepts private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, bucket-owner-full-control
  cache_control: "CacheControl",
  content_disposition: "ContentDisposition",
  content_encoding: "ContentEncoding",
  content_language: "ContentLanguage",
  content_type: "ContentType",
  expires: Time.now,
  grant_full_control: "GrantFullControl",
  grant_read: "GrantRead",
  grant_read_acp: "GrantReadACP",
  grant_write_acp: "GrantWriteACP",
  metadata: {
    "MetadataKey" => "MetadataValue",
  },
  server_side_encryption: "AES256", # accepts AES256, aws:kms
  storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS
  website_redirect_location: "WebsiteRedirectLocation",
  sse_customer_algorithm: "SSECustomerAlgorithm",
  sse_customer_key: "SSECustomerKey",
  sse_customer_key_md5: "SSECustomerKeyMD5",
  ssekms_key_id: "SSEKMSKeyId",
  ssekms_encryption_context: "SSEKMSEncryptionContext",
  bucket_key_enabled: false,
  request_payer: "requester", # accepts requester
  tagging: "TaggingHeader",
  object_lock_mode: "GOVERNANCE", # accepts GOVERNANCE, COMPLIANCE
  object_lock_retain_until_date: Time.now,
  object_lock_legal_hold_status: "ON", # accepts ON, OFF
  expected_bucket_owner: "AccountId",
})

@param [Hash] options ({}) @option options [String] :acl

The canned ACL to apply to the object.

This action is not supported by Amazon S3 on Outposts.

@option options [String] :cache_control

Specifies caching behavior along the request/reply chain.

@option options [String] :content_disposition

Specifies presentational information for the object.

@option options [String] :content_encoding

Specifies what content encodings have been applied to the object and
thus what decoding mechanisms must be applied to obtain the media-type
referenced by the Content-Type header field.

@option options [String] :content_language

The language the content is in.

@option options [String] :content_type

A standard MIME type describing the format of the object data.

@option options [Time,DateTime,Date,Integer,String] :expires

The date and time at which the object is no longer cacheable.

@option options [String] :grant_full_control

Gives the grantee READ, READ\_ACP, and WRITE\_ACP permissions on the
object.

This action is not supported by Amazon S3 on Outposts.

@option options [String] :grant_read

Allows grantee to read the object data and its metadata.

This action is not supported by Amazon S3 on Outposts.

@option options [String] :grant_read_acp

Allows grantee to read the object ACL.

This action is not supported by Amazon S3 on Outposts.

@option options [String] :grant_write_acp

Allows grantee to write the ACL for the applicable object.

This action is not supported by Amazon S3 on Outposts.

@option options [Hash<String,String>] :metadata

A map of metadata to store with the object in S3.

@option options [String] :server_side_encryption

The server-side encryption algorithm used when storing this object in
Amazon S3 (for example, AES256, aws:kms).

@option options [String] :storage_class

By default, Amazon S3 uses the STANDARD Storage Class to store newly
created objects. The STANDARD storage class provides high durability
and high availability. Depending on performance needs, you can specify
a different Storage Class. Amazon S3 on Outposts only uses the
OUTPOSTS Storage Class. For more information, see [Storage Classes][1]
in the *Amazon S3 User Guide*.

[1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html

@option options [String] :website_redirect_location

If the bucket is configured as a website, redirects requests for this
object to another object in the same bucket or to an external URL.
Amazon S3 stores the value of this header in the object metadata.

@option options [String] :sse_customer_algorithm

Specifies the algorithm to use to when encrypting the object (for
example, AES256).

@option options [String] :sse_customer_key

Specifies the customer-provided encryption key for Amazon S3 to use in
encrypting data. This value is used to store the object and then it is
discarded; Amazon S3 does not store the encryption key. The key must
be appropriate for use with the algorithm specified in the
`x-amz-server-side-encryption-customer-algorithm` header.

@option options [String] :sse_customer_key_md5

Specifies the 128-bit MD5 digest of the encryption key according to
RFC 1321. Amazon S3 uses this header for a message integrity check to
ensure that the encryption key was transmitted without error.

@option options [String] :ssekms_key_id

Specifies the ID of the symmetric customer managed Amazon Web Services
KMS CMK to use for object encryption. All GET and PUT requests for an
object protected by Amazon Web Services KMS will fail if not made via
SSL or using SigV4. For information about configuring using any of the
officially supported Amazon Web Services SDKs and Amazon Web Services
CLI, see [Specifying the Signature Version in Request
Authentication][1] in the *Amazon S3 User Guide*.

[1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version

@option options [String] :ssekms_encryption_context

Specifies the Amazon Web Services KMS Encryption Context to use for
object encryption. The value of this header is a base64-encoded UTF-8
string holding JSON with the encryption context key-value pairs.

@option options [Boolean] :bucket_key_enabled

Specifies whether Amazon S3 should use an S3 Bucket Key for object
encryption with server-side encryption using AWS KMS (SSE-KMS).
Setting this header to `true` causes Amazon S3 to use an S3 Bucket Key
for object encryption with SSE-KMS.

Specifying this header with an object action doesn’t affect
bucket-level settings for S3 Bucket Key.

@option options [String] :request_payer

Confirms that the requester knows that they will be charged for the
request. Bucket owners need not specify this parameter in their
requests. For information about downloading objects from requester
pays buckets, see [Downloading Objects in Requestor Pays Buckets][1]
in the *Amazon S3 User Guide*.

[1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html

@option options [String] :tagging

The tag-set for the object. The tag-set must be encoded as URL Query
parameters.

@option options [String] :object_lock_mode

Specifies the Object Lock mode that you want to apply to the uploaded
object.

@option options [Time,DateTime,Date,Integer,String] :object_lock_retain_until_date

Specifies the date and time when you want the Object Lock to expire.

@option options [String] :object_lock_legal_hold_status

Specifies whether you want to apply a Legal Hold to the uploaded
object.

@option options [String] :expected_bucket_owner

The account ID of the expected bucket owner. If the bucket is owned by
a different account, the request will fail with an HTTP `403 (Access
Denied)` error.

@return [MultipartUpload]

# File lib/aws-sdk-s3/object.rb, line 1080
def initiate_multipart_upload(options = {})
  options = options.merge(
    bucket: @bucket_name,
    key: @key
  )
  resp = @client.create_multipart_upload(options)
  MultipartUpload.new(
    bucket_name: @bucket_name,
    object_key: @key,
    id: resp.data.upload_id,
    client: @client
  )
end
key() click to toggle source

@return [String]

# File lib/aws-sdk-s3/object.rb, line 41
def key
  @key
end
last_modified() click to toggle source

Creation date of the object. @return [Time]

# File lib/aws-sdk-s3/object.rb, line 102
def last_modified
  data[:last_modified]
end
load() click to toggle source

Loads, or reloads {#data} for the current {Object}. Returns `self` making it possible to chain methods.

object.reload.data

@return [self]

# File lib/aws-sdk-s3/object.rb, line 354
def load
  resp = @client.head_object(
    bucket: @bucket_name,
    key: @key
  )
  @data = resp.data
  self
end
Also aliased as: reload
metadata() click to toggle source

A map of metadata to store with the object in S3. @return [Hash<String,String>]

# File lib/aws-sdk-s3/object.rb, line 198
def metadata
  data[:metadata]
end
missing_meta() click to toggle source

This is set to the number of metadata entries not returned in `x-amz-meta` headers. This can happen if you create metadata using an API like SOAP that supports more flexible metadata than the REST API. For example, using SOAP, you can create metadata whose values are not legal HTTP headers. @return [Integer]

# File lib/aws-sdk-s3/object.rb, line 125
def missing_meta
  data[:missing_meta]
end
move_to(target, options = {}) click to toggle source

Copies and deletes the current object. The object will only be deleted if the copy operation succeeds.

@param (see Object#copy_to) @option (see Object#copy_to) @return [void] @see Object#copy_to @see Object#delete

# File lib/aws-sdk-s3/customizations/object.rb, line 120
def move_to(target, options = {})
  copy_to(target, options)
  delete
end
multipart_upload(id) click to toggle source

@param [String] id @return [MultipartUpload]

# File lib/aws-sdk-s3/object.rb, line 1561
def multipart_upload(id)
  MultipartUpload.new(
    bucket_name: @bucket_name,
    object_key: @key,
    id: id,
    client: @client
  )
end
object_lock_mode() click to toggle source

The Object Lock mode, if any, that's in effect for this object. This header is only returned if the requester has the `s3:GetObjectRetention` permission. For more information about S3 Object Lock, see [Object Lock].

[1]: docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html @return [String]

# File lib/aws-sdk-s3/object.rb, line 315
def object_lock_mode
  data[:object_lock_mode]
end
object_lock_retain_until_date() click to toggle source

The date and time when the Object Lock retention period expires. This header is only returned if the requester has the `s3:GetObjectRetention` permission. @return [Time]

# File lib/aws-sdk-s3/object.rb, line 323
def object_lock_retain_until_date
  data[:object_lock_retain_until_date]
end
parts_count() click to toggle source

The count of parts this object has. @return [Integer]

# File lib/aws-sdk-s3/object.rb, line 302
def parts_count
  data[:parts_count]
end
presigned_post(options = {}) click to toggle source

Creates a {PresignedPost} that makes it easy to upload a file from a web browser direct to Amazon S3 using an HTML post form with a file field.

See the {PresignedPost} documentation for more information.

@option (see PresignedPost#initialize) @return [PresignedPost] @see PresignedPost

# File lib/aws-sdk-s3/customizations/object.rb, line 134
def presigned_post(options = {})
  PresignedPost.new(
    client.config.credentials,
    client.config.region,
    bucket_name,
    { key: key, url: bucket.url }.merge(options)
  )
end
presigned_url(method, params = {}) click to toggle source

Generates a pre-signed URL for this object.

@example Pre-signed GET URL, valid for one hour

obj.presigned_url(:get, expires_in: 3600)
#=> "https://bucket-name.s3.amazonaws.com/object-key?..."

@example Pre-signed PUT with a canned ACL

# the object uploaded using this URL will be publicly accessible
obj.presigned_url(:put, acl: 'public-read')
#=> "https://bucket-name.s3.amazonaws.com/object-key?..."

@example Pre-signed UploadPart PUT

# the object uploaded using this URL will be publicly accessible
obj.presigned_url(:upload_part, part_number: 1, upload_id: 'uploadIdToken')
#=> "https://bucket-name.s3.amazonaws.com/object-key?..."

@param [Symbol] method

The S3 operation to generate a presigned URL for. Valid values
are `:get`, `:put`, `:head`, `:delete`, `:create_multipart_upload`, 
`:list_multipart_uploads`, `:complete_multipart_upload`,
`:abort_multipart_upload`, `:list_parts`, and `:upload_part`.

@param [Hash] params

Additional request parameters to use when generating the pre-signed
URL. See the related documentation in {Client} for accepted
params.

| Method                       | Client Method                      |
|------------------------------|------------------------------------|
| `:get`                       | {Client#get_object}                |
| `:put`                       | {Client#put_object}                |
| `:head`                      | {Client#head_object}               |
| `:delete`                    | {Client#delete_object}             |
| `:create_multipart_upload`   | {Client#create_multipart_upload}   |
| `:list_multipart_uploads`    | {Client#list_multipart_uploads}    |
| `:complete_multipart_upload` | {Client#complete_multipart_upload} |
| `:abort_multipart_upload`    | {Client#abort_multipart_upload}    |
| `:list_parts`                | {Client#list_parts}                |
| `:upload_part`               | {Client#upload_part}               |

@option params [Boolean] :virtual_host (false) When `true` the

presigned URL will use the bucket name as a virtual host.

  bucket = Aws::S3::Bucket.new('my.bucket.com')
  bucket.object('key').presigned_url(virtual_host: true)
  #=> "http://my.bucket.com/key?..."

@option params [Integer] :expires_in (900) Number of seconds before

the pre-signed URL expires. This may not exceed one week (604800
seconds). Note that the pre-signed URL is also only valid as long as
credentials used to sign it are. For example, when using IAM roles,
temporary tokens generated for signing also have a default expiration
which will affect the effective expiration of the pre-signed URL.

@raise [ArgumentError] Raised if `:expires_in` exceeds one week

(604800 seconds).

@return [String]

# File lib/aws-sdk-s3/customizations/object.rb, line 205
def presigned_url(method, params = {})
  presigner = Presigner.new(client: client)

  if %w(delete head get put).include?(method.to_s)
    method = "#{method}_object".to_sym
  end

  presigner.presigned_url(
    method.downcase,
    params.merge(bucket: bucket_name, key: key)
  )
end
public_url(options = {}) click to toggle source

Returns the public (un-signed) URL for this object.

s3.bucket('bucket-name').object('obj-key').public_url
#=> "https://bucket-name.s3.amazonaws.com/obj-key"

To use virtual hosted bucket url. Uses https unless secure: false is set. If the bucket name contains dots (.) then you will need to set secure: false.

s3.bucket('my-bucket.com').object('key')
  .public_url(virtual_host: true)
#=> "https://my-bucket.com/key"

@option options [Boolean] :virtual_host (false) When `true`, the bucket

name will be used as the host name. This is useful when you have
a CNAME configured for the bucket.

@option options [Boolean] :secure (true) When `false`, http

will be used with virtual_host.  This is required when
the bucket name has a dot (.) in it.

@return [String]

# File lib/aws-sdk-s3/customizations/object.rb, line 240
def public_url(options = {})
  url = URI.parse(bucket.url(options))
  url.path += '/' unless url.path[-1] == '/'
  url.path += key.gsub(/[^\/]+/) { |s| Seahorse::Util.uri_escape(s) }
  url.to_s
end
put(options = {}) click to toggle source

@example Request syntax with placeholder values

object.put({
  acl: "private", # accepts private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, bucket-owner-full-control
  body: source_file,
  cache_control: "CacheControl",
  content_disposition: "ContentDisposition",
  content_encoding: "ContentEncoding",
  content_language: "ContentLanguage",
  content_length: 1,
  content_md5: "ContentMD5",
  content_type: "ContentType",
  expires: Time.now,
  grant_full_control: "GrantFullControl",
  grant_read: "GrantRead",
  grant_read_acp: "GrantReadACP",
  grant_write_acp: "GrantWriteACP",
  metadata: {
    "MetadataKey" => "MetadataValue",
  },
  server_side_encryption: "AES256", # accepts AES256, aws:kms
  storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS
  website_redirect_location: "WebsiteRedirectLocation",
  sse_customer_algorithm: "SSECustomerAlgorithm",
  sse_customer_key: "SSECustomerKey",
  sse_customer_key_md5: "SSECustomerKeyMD5",
  ssekms_key_id: "SSEKMSKeyId",
  ssekms_encryption_context: "SSEKMSEncryptionContext",
  bucket_key_enabled: false,
  request_payer: "requester", # accepts requester
  tagging: "TaggingHeader",
  object_lock_mode: "GOVERNANCE", # accepts GOVERNANCE, COMPLIANCE
  object_lock_retain_until_date: Time.now,
  object_lock_legal_hold_status: "ON", # accepts ON, OFF
  expected_bucket_owner: "AccountId",
})

@param [Hash] options ({}) @option options [String] :acl

The canned ACL to apply to the object. For more information, see
[Canned ACL][1].

This action is not supported by Amazon S3 on Outposts.

[1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL

@option options [String, StringIO, File] :body

Object data.

@option options [String] :cache_control

Can be used to specify caching behavior along the request/reply chain.
For more information, see
[http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9][1].

[1]: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9

@option options [String] :content_disposition

Specifies presentational information for the object. For more
information, see
[http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1][1].

[1]: http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1

@option options [String] :content_encoding

Specifies what content encodings have been applied to the object and
thus what decoding mechanisms must be applied to obtain the media-type
referenced by the Content-Type header field. For more information, see
[http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11][1].

[1]: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11

@option options [String] :content_language

The language the content is in.

@option options [Integer] :content_length

Size of the body in bytes. This parameter is useful when the size of
the body cannot be determined automatically. For more information, see
[http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13][1].

[1]: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13

@option options [String] :content_md5

The base64-encoded 128-bit MD5 digest of the message (without the
headers) according to RFC 1864. This header can be used as a message
integrity check to verify that the data is the same data that was
originally sent. Although it is optional, we recommend using the
Content-MD5 mechanism as an end-to-end integrity check. For more
information about REST request authentication, see [REST
Authentication][1].

[1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html

@option options [String] :content_type

A standard MIME type describing the format of the contents. For more
information, see
[http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17][1].

[1]: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17

@option options [Time,DateTime,Date,Integer,String] :expires

The date and time at which the object is no longer cacheable. For more
information, see
[http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21][1].

[1]: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21

@option options [String] :grant_full_control

Gives the grantee READ, READ\_ACP, and WRITE\_ACP permissions on the
object.

This action is not supported by Amazon S3 on Outposts.

@option options [String] :grant_read

Allows grantee to read the object data and its metadata.

This action is not supported by Amazon S3 on Outposts.

@option options [String] :grant_read_acp

Allows grantee to read the object ACL.

This action is not supported by Amazon S3 on Outposts.

@option options [String] :grant_write_acp

Allows grantee to write the ACL for the applicable object.

This action is not supported by Amazon S3 on Outposts.

@option options [Hash<String,String>] :metadata

A map of metadata to store with the object in S3.

@option options [String] :server_side_encryption

The server-side encryption algorithm used when storing this object in
Amazon S3 (for example, AES256, aws:kms).

@option options [String] :storage_class

By default, Amazon S3 uses the STANDARD Storage Class to store newly
created objects. The STANDARD storage class provides high durability
and high availability. Depending on performance needs, you can specify
a different Storage Class. Amazon S3 on Outposts only uses the
OUTPOSTS Storage Class. For more information, see [Storage Classes][1]
in the *Amazon S3 User Guide*.

[1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html

@option options [String] :website_redirect_location

If the bucket is configured as a website, redirects requests for this
object to another object in the same bucket or to an external URL.
Amazon S3 stores the value of this header in the object metadata. For
information about object metadata, see [Object Key and Metadata][1].

In the following example, the request header sets the redirect to an
object (anotherPage.html) in the same bucket:

`x-amz-website-redirect-location: /anotherPage.html`

In the following example, the request header sets the object redirect
to another website:

`x-amz-website-redirect-location: http://www.example.com/`

For more information about website hosting in Amazon S3, see [Hosting
Websites on Amazon S3][2] and [How to Configure Website Page
Redirects][3].

[1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
[2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html
[3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html

@option options [String] :sse_customer_algorithm

Specifies the algorithm to use to when encrypting the object (for
example, AES256).

@option options [String] :sse_customer_key

Specifies the customer-provided encryption key for Amazon S3 to use in
encrypting data. This value is used to store the object and then it is
discarded; Amazon S3 does not store the encryption key. The key must
be appropriate for use with the algorithm specified in the
`x-amz-server-side-encryption-customer-algorithm` header.

@option options [String] :sse_customer_key_md5

Specifies the 128-bit MD5 digest of the encryption key according to
RFC 1321. Amazon S3 uses this header for a message integrity check to
ensure that the encryption key was transmitted without error.

@option options [String] :ssekms_key_id

If `x-amz-server-side-encryption` is present and has the value of
`aws:kms`, this header specifies the ID of the Amazon Web Services Key
Management Service (Amazon Web Services KMS) symmetrical customer
managed customer master key (CMK) that was used for the object. If you
specify `x-amz-server-side-encryption:aws:kms`, but do not provide`
x-amz-server-side-encryption-aws-kms-key-id`, Amazon S3 uses the
Amazon Web Services managed CMK in Amazon Web Services to protect the
data. If the KMS key does not exist in the same account issuing the
command, you must use the full ARN and not just the ID.

@option options [String] :ssekms_encryption_context

Specifies the Amazon Web Services KMS Encryption Context to use for
object encryption. The value of this header is a base64-encoded UTF-8
string holding JSON with the encryption context key-value pairs.

@option options [Boolean] :bucket_key_enabled

Specifies whether Amazon S3 should use an S3 Bucket Key for object
encryption with server-side encryption using AWS KMS (SSE-KMS).
Setting this header to `true` causes Amazon S3 to use an S3 Bucket Key
for object encryption with SSE-KMS.

Specifying this header with a PUT action doesn’t affect bucket-level
settings for S3 Bucket Key.

@option options [String] :request_payer

Confirms that the requester knows that they will be charged for the
request. Bucket owners need not specify this parameter in their
requests. For information about downloading objects from requester
pays buckets, see [Downloading Objects in Requestor Pays Buckets][1]
in the *Amazon S3 User Guide*.

[1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html

@option options [String] :tagging

The tag-set for the object. The tag-set must be encoded as URL Query
parameters. (For example, "Key1=Value1")

@option options [String] :object_lock_mode

The Object Lock mode that you want to apply to this object.

@option options [Time,DateTime,Date,Integer,String] :object_lock_retain_until_date

The date and time when you want this object's Object Lock to expire.
Must be formatted as a timestamp parameter.

@option options [String] :object_lock_legal_hold_status

Specifies whether a legal hold will be applied to this object. For
more information about S3 Object Lock, see [Object Lock][1].

[1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html

@option options [String] :expected_bucket_owner

The account ID of the expected bucket owner. If the bucket is owned by
a different account, the request will fail with an HTTP `403 (Access
Denied)` error.

@return [Types::PutObjectOutput]

# File lib/aws-sdk-s3/object.rb, line 1328
def put(options = {})
  options = options.merge(
    bucket: @bucket_name,
    key: @key
  )
  resp = @client.put_object(options)
  resp.data
end
reload()
Alias for: load
replication_status() click to toggle source

Amazon S3 can return this header if your request involves a bucket that is either a source or a destination in a replication rule.

In replication, you have a source bucket on which you configure replication and destination bucket or buckets where Amazon S3 stores object replicas. When you request an object (`GetObject`) or object metadata (`HeadObject`) from these buckets, Amazon S3 will return the `x-amz-replication-status` header in the response as follows:

  • If requesting an object from the source bucket — Amazon S3 will return the `x-amz-replication-status` header if the object in your request is eligible for replication.

    For example, suppose that in your replication configuration, you specify object prefix `TaxDocs` requesting Amazon S3 to replicate objects with key prefix `TaxDocs`. Any objects you upload with this key name prefix, for example `TaxDocs/document1.pdf`, are eligible for replication. For any object request with this key name prefix, Amazon S3 will return the `x-amz-replication-status` header with value PENDING, COMPLETED or FAILED indicating object replication status.

  • If requesting an object from a destination bucket — Amazon S3 will return the `x-amz-replication-status` header with value REPLICA if the object in your request is a replica that Amazon S3 created and there is no replica modification replication in progress.

  • When replicating objects to multiple destination buckets the `x-amz-replication-status` header acts differently. The header of the source object will only return a value of COMPLETED when replication is successful to all destinations. The header will remain at value PENDING until replication has completed for all destinations. If one or more destinations fails replication the header will return FAILED.

For more information, see [Replication].

[1]: docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html @return [String]

# File lib/aws-sdk-s3/object.rb, line 296
def replication_status
  data[:replication_status]
end
request_charged() click to toggle source

If present, indicates that the requester was successfully charged for the request. @return [String]

# File lib/aws-sdk-s3/object.rb, line 251
def request_charged
  data[:request_charged]
end
restore() click to toggle source

If the object is an archived object (an object whose storage class is GLACIER), the response includes this header if either the archive restoration is in progress (see [RestoreObject] or an archive copy is already restored.

If an archive copy is already restored, the header value indicates when Amazon S3 is scheduled to delete the object copy. For example:

`x-amz-restore: ongoing-request=“false”, expiry-date=“Fri, 21 Dec 2012 00:00:00 GMT”`

If the object restoration is in progress, the header returns the value `ongoing-request=“true”`.

For more information about archiving objects, see [Transitioning Objects: General Considerations].

[1]: docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html [2]: docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-transition-general-considerations @return [String]

# File lib/aws-sdk-s3/object.rb, line 90
def restore
  data[:restore]
end
restore_object(options = {}) click to toggle source

@example Request syntax with placeholder values

object.restore_object({
  version_id: "ObjectVersionId",
  restore_request: {
    days: 1,
    glacier_job_parameters: {
      tier: "Standard", # required, accepts Standard, Bulk, Expedited
    },
    type: "SELECT", # accepts SELECT
    tier: "Standard", # accepts Standard, Bulk, Expedited
    description: "Description",
    select_parameters: {
      input_serialization: { # required
        csv: {
          file_header_info: "USE", # accepts USE, IGNORE, NONE
          comments: "Comments",
          quote_escape_character: "QuoteEscapeCharacter",
          record_delimiter: "RecordDelimiter",
          field_delimiter: "FieldDelimiter",
          quote_character: "QuoteCharacter",
          allow_quoted_record_delimiter: false,
        },
        compression_type: "NONE", # accepts NONE, GZIP, BZIP2
        json: {
          type: "DOCUMENT", # accepts DOCUMENT, LINES
        },
        parquet: {
        },
      },
      expression_type: "SQL", # required, accepts SQL
      expression: "Expression", # required
      output_serialization: { # required
        csv: {
          quote_fields: "ALWAYS", # accepts ALWAYS, ASNEEDED
          quote_escape_character: "QuoteEscapeCharacter",
          record_delimiter: "RecordDelimiter",
          field_delimiter: "FieldDelimiter",
          quote_character: "QuoteCharacter",
        },
        json: {
          record_delimiter: "RecordDelimiter",
        },
      },
    },
    output_location: {
      s3: {
        bucket_name: "BucketName", # required
        prefix: "LocationPrefix", # required
        encryption: {
          encryption_type: "AES256", # required, accepts AES256, aws:kms
          kms_key_id: "SSEKMSKeyId",
          kms_context: "KMSContext",
        },
        canned_acl: "private", # accepts private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, bucket-owner-full-control
        access_control_list: [
          {
            grantee: {
              display_name: "DisplayName",
              email_address: "EmailAddress",
              id: "ID",
              type: "CanonicalUser", # required, accepts CanonicalUser, AmazonCustomerByEmail, Group
              uri: "URI",
            },
            permission: "FULL_CONTROL", # accepts FULL_CONTROL, WRITE, WRITE_ACP, READ, READ_ACP
          },
        ],
        tagging: {
          tag_set: [ # required
            {
              key: "ObjectKey", # required
              value: "Value", # required
            },
          ],
        },
        user_metadata: [
          {
            name: "MetadataKey",
            value: "MetadataValue",
          },
        ],
        storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS
      },
    },
  },
  request_payer: "requester", # accepts requester
  expected_bucket_owner: "AccountId",
})

@param [Hash] options ({}) @option options [String] :version_id

VersionId used to reference a specific version of the object.

@option options [Types::RestoreRequest] :restore_request

Container for restore job parameters.

@option options [String] :request_payer

Confirms that the requester knows that they will be charged for the
request. Bucket owners need not specify this parameter in their
requests. For information about downloading objects from requester
pays buckets, see [Downloading Objects in Requestor Pays Buckets][1]
in the *Amazon S3 User Guide*.

[1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html

@option options [String] :expected_bucket_owner

The account ID of the expected bucket owner. If the bucket is owned by
a different account, the request will fail with an HTTP `403 (Access
Denied)` error.

@return [Types::RestoreObjectOutput]

# File lib/aws-sdk-s3/object.rb, line 1445
def restore_object(options = {})
  options = options.merge(
    bucket: @bucket_name,
    key: @key
  )
  resp = @client.restore_object(options)
  resp.data
end
server_side_encryption() click to toggle source

If the object is stored using server-side encryption either with an Amazon Web Services KMS customer master key (CMK) or an Amazon S3-managed encryption key, the response includes this header with the value of the server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms). @return [String]

# File lib/aws-sdk-s3/object.rb, line 192
def server_side_encryption
  data[:server_side_encryption]
end
size()
Alias for: content_length
sse_customer_algorithm() click to toggle source

If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used. @return [String]

# File lib/aws-sdk-s3/object.rb, line 206
def sse_customer_algorithm
  data[:sse_customer_algorithm]
end
sse_customer_key_md5() click to toggle source

If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round-trip message integrity verification of the customer-provided encryption key. @return [String]

# File lib/aws-sdk-s3/object.rb, line 215
def sse_customer_key_md5
  data[:sse_customer_key_md5]
end
ssekms_key_id() click to toggle source

If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric customer managed customer master key (CMK) that was used for the object. @return [String]

# File lib/aws-sdk-s3/object.rb, line 223
def ssekms_key_id
  data[:ssekms_key_id]
end
storage_class() click to toggle source

Provides storage class information of the object. Amazon S3 returns this header for all objects except for S3 Standard storage class objects.

For more information, see [Storage Classes].

[1]: docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html @return [String]

# File lib/aws-sdk-s3/object.rb, line 244
def storage_class
  data[:storage_class]
end
upload_file(source, options = {}) { |response| ... } click to toggle source

Uploads a file from disk to the current object in S3.

# small files are uploaded in a single API call
obj.upload_file('/path/to/file')

Files larger than or equal to `:multipart_threshold` are uploaded using the Amazon S3 multipart upload APIs.

# large files are automatically split into parts
# and the parts are uploaded in parallel
obj.upload_file('/path/to/very_large_file')

The response of the S3 upload API is yielded if a block given.

# API response will have etag value of the file
obj.upload_file('/path/to/file') do |response|
  etag = response.etag
end

You can provide a callback to monitor progress of the upload:

# bytes and totals are each an array with 1 entry per part
progress = Proc.new do |bytes, totals|
  puts bytes.map.with_index { |b, i| "Part #{i+1}: #{b} / #{totals[i]}"}.join(' ') + "Total: #{100.0 * bytes.sum / totals.sum }%" }
end
obj.upload_file('/path/to/file')

@param [String, Pathname, File, Tempfile] source A file on the local

file system that will be uploaded as this object. This can either be
a String or Pathname to the file, an open File object, or an open
Tempfile object. If you pass an open File or Tempfile object, then
you are responsible for closing it after the upload completes. When
using an open Tempfile, rewind it before uploading or else the object
will be empty.

@option options [Integer] :multipart_threshold (104857600) Files larger

than or equal to `:multipart_threshold` are uploaded using the S3
multipart APIs.
Default threshold is 100MB.

@option options [Integer] :thread_count (10) The number of parallel

multipart uploads. This option is not used if the file is smaller than
`:multipart_threshold`.

@option options [Proc] :progress_callback

A Proc that will be called when each chunk of the upload is sent.
It will be invoked with [bytes_read], [total_sizes]

@raise [MultipartUploadError] If an object is being uploaded in

parts, and the upload can not be completed, then the upload is
aborted and this error is raised.  The raised error has a `#errors`
method that returns the failures that caused the upload to be
aborted.

@return [Boolean] Returns `true` when the object is uploaded

without any errors.
# File lib/aws-sdk-s3/customizations/object.rb, line 361
def upload_file(source, options = {})
  uploading_options = options.dup
  uploader = FileUploader.new(
    multipart_threshold: uploading_options.delete(:multipart_threshold),
    client: client
  )
  response = uploader.upload(
    source,
    uploading_options.merge(bucket: bucket_name, key: key)
  )
  yield response if block_given?
  true
end
upload_stream(options = {}, &block) click to toggle source

Uploads a stream in a streaming fashion to the current object in S3.

Passed chunks automatically split into multipart upload parts and the parts are uploaded in parallel. This allows for streaming uploads that never touch the disk.

Note that this is known to have issues in JRuby until jruby-9.1.15.0, so avoid using this with older versions of JRuby.

@example Streaming chunks of data

obj.upload_stream do |write_stream|
  10.times { write_stream << 'foo' }
end

@example Streaming chunks of data

obj.upload_stream do |write_stream|
  IO.copy_stream(IO.popen('ls'), write_stream)
end

@example Streaming chunks of data

obj.upload_stream do |write_stream|
  IO.copy_stream(STDIN, write_stream)
end

@option options [Integer] :thread_count (10) The number of parallel

multipart uploads

@option options [Boolean] :tempfile (false) Normally read data is stored

in memory when building the parts in order to complete the underlying
multipart upload. By passing `:tempfile => true` data read will be
temporarily stored on disk reducing the memory footprint vastly.

@option options [Integer] :part_size (5242880)

Define how big each part size but the last should be.
Default `:part_size` is `5 * 1024 * 1024`.

@raise [MultipartUploadError] If an object is being uploaded in

parts, and the upload can not be completed, then the upload is
aborted and this error is raised.  The raised error has a `#errors`
method that returns the failures that caused the upload to be
aborted.

@return [Boolean] Returns `true` when the object is uploaded

without any errors.
# File lib/aws-sdk-s3/customizations/object.rb, line 290
def upload_stream(options = {}, &block)
  uploading_options = options.dup
  uploader = MultipartStreamUploader.new(
    client: client,
    thread_count: uploading_options.delete(:thread_count),
    tempfile: uploading_options.delete(:tempfile),
    part_size: uploading_options.delete(:part_size)
  )
  uploader.upload(
    uploading_options.merge(bucket: bucket_name, key: key),
    &block
  )
  true
end
version(id) click to toggle source

@param [String] id @return [ObjectVersion]

# File lib/aws-sdk-s3/object.rb, line 1572
def version(id)
  ObjectVersion.new(
    bucket_name: @bucket_name,
    object_key: @key,
    id: id,
    client: @client
  )
end
version_id() click to toggle source

Version of the object. @return [String]

# File lib/aws-sdk-s3/object.rb, line 131
def version_id
  data[:version_id]
end
wait_until(options = {}, &block) click to toggle source

@deprecated Use [Aws::S3::Client] wait_until instead

Waiter polls an API operation until a resource enters a desired state.

@note The waiting operation is performed on a copy. The original resource

remains unchanged.

## Basic Usage

Waiter will polls until it is successful, it fails by entering a terminal state, or until a maximum number of attempts are made.

# polls in a loop until condition is true
resource.wait_until(options) {|resource| condition}

## Example

instance.wait_until(max_attempts:10, delay:5) do |instance|
  instance.state.name == 'running'
end

## Configuration

You can configure the maximum number of polling attempts, and the delay (in seconds) between each polling attempt. The waiting condition is set by passing a block to {#wait_until}:

# poll for ~25 seconds
resource.wait_until(max_attempts:5,delay:5) {|resource|...}

## Callbacks

You can be notified before each polling attempt and before each delay. If you throw `:success` or `:failure` from these callbacks, it will terminate the waiter.

started_at = Time.now
# poll for 1 hour, instead of a number of attempts
proc = Proc.new do |attempts, response|
  throw :failure if Time.now - started_at > 3600
end

  # disable max attempts
instance.wait_until(before_wait:proc, max_attempts:nil) {...}

## Handling Errors

When a waiter is successful, it returns the Resource. When a waiter fails, it raises an error.

begin
  resource.wait_until(...)
rescue Aws::Waiters::Errors::WaiterFailed
  # resource did not enter the desired state in time
end

@yieldparam [Resource] resource to be used in the waiting condition.

@raise [Aws::Waiters::Errors::FailureStateError] Raised when the waiter

terminates because the waiter has entered a state that it will not
transition out of, preventing success.

yet successful.

@raise [Aws::Waiters::Errors::UnexpectedError] Raised when an error is

encountered while polling for a resource that is not expected.

@raise [NotImplementedError] Raised when the resource does not

@option options [Integer] :max_attempts (10) Maximum number of attempts @option options [Integer] :delay (10) Delay between each attempt in seconds @option options [Proc] :before_attempt (nil) Callback invoked before each attempt @option options [Proc] :before_wait (nil) Callback invoked before each wait @return [Resource] if the waiter was successful

# File lib/aws-sdk-s3/object.rb, line 511
def wait_until(options = {}, &block)
  self_copy = self.dup
  attempts = 0
  options[:max_attempts] = 10 unless options.key?(:max_attempts)
  options[:delay] ||= 10
  options[:poller] = Proc.new do
    attempts += 1
    if block.call(self_copy)
      [:success, self_copy]
    else
      self_copy.reload unless attempts == options[:max_attempts]
      :retry
    end
  end
  Aws::Waiters::Waiter.new(options).wait({})
end
wait_until_exists(options = {}, &block) click to toggle source

@param [Hash] options ({}) @option options [Integer] :max_attempts (20) @option options [Float] :delay (5) @option options [Proc] :before_attempt @option options [Proc] :before_wait @return [Object]

# File lib/aws-sdk-s3/object.rb, line 399
def wait_until_exists(options = {}, &block)
  options, params = separate_params_and_options(options)
  waiter = Waiters::ObjectExists.new(options)
  yield_waiter_and_warn(waiter, &block) if block_given?
  waiter.wait(params.merge(bucket: @bucket_name,
    key: @key))
  Object.new({
    bucket_name: @bucket_name,
    key: @key,
    client: @client
  })
end
wait_until_not_exists(options = {}, &block) click to toggle source

@param [Hash] options ({}) @option options [Integer] :max_attempts (20) @option options [Float] :delay (5) @option options [Proc] :before_attempt @option options [Proc] :before_wait @return [Object]

# File lib/aws-sdk-s3/object.rb, line 418
def wait_until_not_exists(options = {}, &block)
  options, params = separate_params_and_options(options)
  waiter = Waiters::ObjectNotExists.new(options)
  yield_waiter_and_warn(waiter, &block) if block_given?
  waiter.wait(params.merge(bucket: @bucket_name,
    key: @key))
  Object.new({
    bucket_name: @bucket_name,
    key: @key,
    client: @client
  })
end
website_redirect_location() click to toggle source

If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata. @return [String]

# File lib/aws-sdk-s3/object.rb, line 182
def website_redirect_location
  data[:website_redirect_location]
end

Private Instance Methods

extract_bucket_name(args, options) click to toggle source
# File lib/aws-sdk-s3/object.rb, line 1593
def extract_bucket_name(args, options)
  value = args[0] || options.delete(:bucket_name)
  case value
  when String then value
  when nil then raise ArgumentError, "missing required option :bucket_name"
  else
    msg = "expected :bucket_name to be a String, got #{value.class}"
    raise ArgumentError, msg
  end
end
extract_key(args, options) click to toggle source
# File lib/aws-sdk-s3/object.rb, line 1604
def extract_key(args, options)
  value = args[1] || options.delete(:key)
  case value
  when String then value
  when nil then raise ArgumentError, "missing required option :key"
  else
    msg = "expected :key to be a String, got #{value.class}"
    raise ArgumentError, msg
  end
end
separate_params_and_options(options) click to toggle source
# File lib/aws-sdk-s3/object.rb, line 1625
def separate_params_and_options(options)
  opts = Set.new(
    [:client, :max_attempts, :delay, :before_attempt, :before_wait]
  )
  waiter_opts = {}
  waiter_params = {}
  options.each_pair do |key, value|
    if opts.include?(key)
      waiter_opts[key] = value
    else
      waiter_params[key] = value
    end
  end
  waiter_opts[:client] ||= @client
  [waiter_opts, waiter_params]
end
yield_waiter_and_warn(waiter) { |waiter| ... } click to toggle source
# File lib/aws-sdk-s3/object.rb, line 1615
def yield_waiter_and_warn(waiter, &block)
  if !@waiter_block_warned
    msg = "pass options to configure the waiter; "\
          "yielding the waiter is deprecated"
    warn(msg)
    @waiter_block_warned = true
  end
  yield(waiter.waiter)
end