comparison env/lib/python3.7/site-packages/boto/s3/bucket.py @ 0:26e78fe6e8c4 draft

"planemo upload commit c699937486c35866861690329de38ec1a5d9f783"
author shellac
date Sat, 02 May 2020 07:14:21 -0400
parents
children
comparison
equal deleted inserted replaced
-1:000000000000 0:26e78fe6e8c4
1 # Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
2 # Copyright (c) 2010, Eucalyptus Systems, Inc.
3 # All rights reserved.
4 #
5 # Permission is hereby granted, free of charge, to any person obtaining a
6 # copy of this software and associated documentation files (the
7 # "Software"), to deal in the Software without restriction, including
8 # without limitation the rights to use, copy, modify, merge, publish, dis-
9 # tribute, sublicense, and/or sell copies of the Software, and to permit
10 # persons to whom the Software is furnished to do so, subject to the fol-
11 # lowing conditions:
12 #
13 # The above copyright notice and this permission notice shall be included
14 # in all copies or substantial portions of the Software.
15 #
16 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17 # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
18 # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
19 # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
20 # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 # IN THE SOFTWARE.
23
24 import boto
25 from boto import handler
26 from boto.resultset import ResultSet
27 from boto.exception import BotoClientError
28 from boto.s3.acl import Policy, CannedACLStrings, Grant
29 from boto.s3.key import Key
30 from boto.s3.prefix import Prefix
31 from boto.s3.deletemarker import DeleteMarker
32 from boto.s3.multipart import MultiPartUpload
33 from boto.s3.multipart import CompleteMultiPartUpload
34 from boto.s3.multidelete import MultiDeleteResult
35 from boto.s3.multidelete import Error
36 from boto.s3.bucketlistresultset import BucketListResultSet
37 from boto.s3.bucketlistresultset import VersionedBucketListResultSet
38 from boto.s3.bucketlistresultset import MultiPartUploadListResultSet
39 from boto.s3.lifecycle import Lifecycle
40 from boto.s3.tagging import Tags
41 from boto.s3.cors import CORSConfiguration
42 from boto.s3.bucketlogging import BucketLogging
43 from boto.s3 import website
44 import boto.jsonresponse
45 import boto.utils
46 import xml.sax
47 import xml.sax.saxutils
48 import re
49 import base64
50 from collections import defaultdict
51 from boto.compat import BytesIO, six, StringIO, urllib
52
53 # as per http://goo.gl/BDuud (02/19/2011)
54
55
56 class S3WebsiteEndpointTranslate(object):
57
58 trans_region = defaultdict(lambda: 's3-website-us-east-1')
59 trans_region['eu-west-1'] = 's3-website-eu-west-1'
60 trans_region['eu-central-1'] = 's3-website.eu-central-1'
61 trans_region['us-west-1'] = 's3-website-us-west-1'
62 trans_region['us-west-2'] = 's3-website-us-west-2'
63 trans_region['sa-east-1'] = 's3-website-sa-east-1'
64 trans_region['ap-northeast-1'] = 's3-website-ap-northeast-1'
65 trans_region['ap-southeast-1'] = 's3-website-ap-southeast-1'
66 trans_region['ap-southeast-2'] = 's3-website-ap-southeast-2'
67 trans_region['cn-north-1'] = 's3-website.cn-north-1'
68
69 @classmethod
70 def translate_region(self, reg):
71 return self.trans_region[reg]
72
73 S3Permissions = ['READ', 'WRITE', 'READ_ACP', 'WRITE_ACP', 'FULL_CONTROL']
74
75
76 class Bucket(object):
77
78 LoggingGroup = 'http://acs.amazonaws.com/groups/s3/LogDelivery'
79
80 BucketPaymentBody = """<?xml version="1.0" encoding="UTF-8"?>
81 <RequestPaymentConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
82 <Payer>%s</Payer>
83 </RequestPaymentConfiguration>"""
84
85 VersioningBody = """<?xml version="1.0" encoding="UTF-8"?>
86 <VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
87 <Status>%s</Status>
88 <MfaDelete>%s</MfaDelete>
89 </VersioningConfiguration>"""
90
91 VersionRE = '<Status>([A-Za-z]+)</Status>'
92 MFADeleteRE = '<MfaDelete>([A-Za-z]+)</MfaDelete>'
93
94 def __init__(self, connection=None, name=None, key_class=Key):
95 self.name = name
96 self.connection = connection
97 self.key_class = key_class
98
99 def __repr__(self):
100 return '<Bucket: %s>' % self.name
101
102 def __iter__(self):
103 return iter(BucketListResultSet(self))
104
105 def __contains__(self, key_name):
106 return not (self.get_key(key_name) is None)
107
108 def startElement(self, name, attrs, connection):
109 return None
110
111 def endElement(self, name, value, connection):
112 if name == 'Name':
113 self.name = value
114 elif name == 'CreationDate':
115 self.creation_date = value
116 else:
117 setattr(self, name, value)
118
119 def set_key_class(self, key_class):
120 """
121 Set the Key class associated with this bucket. By default, this
122 would be the boto.s3.key.Key class but if you want to subclass that
123 for some reason this allows you to associate your new class with a
124 bucket so that when you call bucket.new_key() or when you get a listing
125 of keys in the bucket you will get an instances of your key class
126 rather than the default.
127
128 :type key_class: class
129 :param key_class: A subclass of Key that can be more specific
130 """
131 self.key_class = key_class
132
133 def lookup(self, key_name, headers=None):
134 """
135 Deprecated: Please use get_key method.
136
137 :type key_name: string
138 :param key_name: The name of the key to retrieve
139
140 :rtype: :class:`boto.s3.key.Key`
141 :returns: A Key object from this bucket.
142 """
143 return self.get_key(key_name, headers=headers)
144
145 def get_key(self, key_name, headers=None, version_id=None,
146 response_headers=None, validate=True):
147 """
148 Check to see if a particular key exists within the bucket. This
149 method uses a HEAD request to check for the existence of the key.
150 Returns: An instance of a Key object or None
151
152 :param key_name: The name of the key to retrieve
153 :type key_name: string
154
155 :param headers: The headers to send when retrieving the key
156 :type headers: dict
157
158 :param version_id:
159 :type version_id: string
160
161 :param response_headers: A dictionary containing HTTP
162 headers/values that will override any headers associated
163 with the stored object in the response. See
164 http://goo.gl/EWOPb for details.
165 :type response_headers: dict
166
167 :param validate: Verifies whether the key exists. If ``False``, this
168 will not hit the service, constructing an in-memory object.
169 Default is ``True``.
170 :type validate: bool
171
172 :rtype: :class:`boto.s3.key.Key`
173 :returns: A Key object from this bucket.
174 """
175 if validate is False:
176 if headers or version_id or response_headers:
177 raise BotoClientError(
178 "When providing 'validate=False', no other params " + \
179 "are allowed."
180 )
181
182 # This leans on the default behavior of ``new_key`` (not hitting
183 # the service). If that changes, that behavior should migrate here.
184 return self.new_key(key_name)
185
186 query_args_l = []
187 if version_id:
188 query_args_l.append('versionId=%s' % version_id)
189 if response_headers:
190 for rk, rv in six.iteritems(response_headers):
191 query_args_l.append('%s=%s' % (rk, urllib.parse.quote(rv)))
192
193 key, resp = self._get_key_internal(key_name, headers, query_args_l)
194 return key
195
196 def _get_key_internal(self, key_name, headers, query_args_l):
197 query_args = '&'.join(query_args_l) or None
198 response = self.connection.make_request('HEAD', self.name, key_name,
199 headers=headers,
200 query_args=query_args)
201 response.read()
202 # Allow any success status (2xx) - for example this lets us
203 # support Range gets, which return status 206:
204 if response.status / 100 == 2:
205 k = self.key_class(self)
206 provider = self.connection.provider
207 k.metadata = boto.utils.get_aws_metadata(response.msg, provider)
208 for field in Key.base_fields:
209 k.__dict__[field.lower().replace('-', '_')] = \
210 response.getheader(field)
211 # the following machinations are a workaround to the fact that
212 # apache/fastcgi omits the content-length header on HEAD
213 # requests when the content-length is zero.
214 # See http://goo.gl/0Tdax for more details.
215 clen = response.getheader('content-length')
216 if clen:
217 k.size = int(response.getheader('content-length'))
218 else:
219 k.size = 0
220 k.name = key_name
221 k.handle_version_headers(response)
222 k.handle_encryption_headers(response)
223 k.handle_restore_headers(response)
224 k.handle_storage_class_header(response)
225 k.handle_addl_headers(response.getheaders())
226 return k, response
227 else:
228 if response.status == 404:
229 return None, response
230 else:
231 raise self.connection.provider.storage_response_error(
232 response.status, response.reason, '')
233
234 def list(self, prefix='', delimiter='', marker='', headers=None,
235 encoding_type=None):
236 """
237 List key objects within a bucket. This returns an instance of an
238 BucketListResultSet that automatically handles all of the result
239 paging, etc. from S3. You just need to keep iterating until
240 there are no more results.
241
242 Called with no arguments, this will return an iterator object across
243 all keys within the bucket.
244
245 The Key objects returned by the iterator are obtained by parsing
246 the results of a GET on the bucket, also known as the List Objects
247 request. The XML returned by this request contains only a subset
248 of the information about each key. Certain metadata fields such
249 as Content-Type and user metadata are not available in the XML.
250 Therefore, if you want these additional metadata fields you will
251 have to do a HEAD request on the Key in the bucket.
252
253 :type prefix: string
254 :param prefix: allows you to limit the listing to a particular
255 prefix. For example, if you call the method with
256 prefix='/foo/' then the iterator will only cycle through
257 the keys that begin with the string '/foo/'.
258
259 :type delimiter: string
260 :param delimiter: can be used in conjunction with the prefix
261 to allow you to organize and browse your keys
262 hierarchically. See http://goo.gl/Xx63h for more details.
263
264 :type marker: string
265 :param marker: The "marker" of where you are in the result set
266
267 :param encoding_type: Requests Amazon S3 to encode the response and
268 specifies the encoding method to use.
269
270 An object key can contain any Unicode character; however, XML 1.0
271 parser cannot parse some characters, such as characters with an
272 ASCII value from 0 to 10. For characters that are not supported in
273 XML 1.0, you can add this parameter to request that Amazon S3
274 encode the keys in the response.
275
276 Valid options: ``url``
277 :type encoding_type: string
278
279 :rtype: :class:`boto.s3.bucketlistresultset.BucketListResultSet`
280 :return: an instance of a BucketListResultSet that handles paging, etc
281 """
282 return BucketListResultSet(self, prefix, delimiter, marker, headers,
283 encoding_type=encoding_type)
284
285 def list_versions(self, prefix='', delimiter='', key_marker='',
286 version_id_marker='', headers=None, encoding_type=None):
287 """
288 List version objects within a bucket. This returns an
289 instance of an VersionedBucketListResultSet that automatically
290 handles all of the result paging, etc. from S3. You just need
291 to keep iterating until there are no more results. Called
292 with no arguments, this will return an iterator object across
293 all keys within the bucket.
294
295 :type prefix: string
296 :param prefix: allows you to limit the listing to a particular
297 prefix. For example, if you call the method with
298 prefix='/foo/' then the iterator will only cycle through
299 the keys that begin with the string '/foo/'.
300
301 :type delimiter: string
302 :param delimiter: can be used in conjunction with the prefix
303 to allow you to organize and browse your keys
304 hierarchically. See:
305
306 http://aws.amazon.com/releasenotes/Amazon-S3/213
307
308 for more details.
309
310 :type key_marker: string
311 :param key_marker: The "marker" of where you are in the result set
312
313 :param encoding_type: Requests Amazon S3 to encode the response and
314 specifies the encoding method to use.
315
316 An object key can contain any Unicode character; however, XML 1.0
317 parser cannot parse some characters, such as characters with an
318 ASCII value from 0 to 10. For characters that are not supported in
319 XML 1.0, you can add this parameter to request that Amazon S3
320 encode the keys in the response.
321
322 Valid options: ``url``
323 :type encoding_type: string
324
325 :rtype: :class:`boto.s3.bucketlistresultset.BucketListResultSet`
326 :return: an instance of a BucketListResultSet that handles paging, etc
327 """
328 return VersionedBucketListResultSet(self, prefix, delimiter,
329 key_marker, version_id_marker,
330 headers,
331 encoding_type=encoding_type)
332
333 def list_multipart_uploads(self, key_marker='',
334 upload_id_marker='',
335 headers=None, encoding_type=None):
336 """
337 List multipart upload objects within a bucket. This returns an
338 instance of an MultiPartUploadListResultSet that automatically
339 handles all of the result paging, etc. from S3. You just need
340 to keep iterating until there are no more results.
341
342 :type key_marker: string
343 :param key_marker: The "marker" of where you are in the result set
344
345 :type upload_id_marker: string
346 :param upload_id_marker: The upload identifier
347
348 :param encoding_type: Requests Amazon S3 to encode the response and
349 specifies the encoding method to use.
350
351 An object key can contain any Unicode character; however, XML 1.0
352 parser cannot parse some characters, such as characters with an
353 ASCII value from 0 to 10. For characters that are not supported in
354 XML 1.0, you can add this parameter to request that Amazon S3
355 encode the keys in the response.
356
357 Valid options: ``url``
358 :type encoding_type: string
359
360 :rtype: :class:`boto.s3.bucketlistresultset.MultiPartUploadListResultSet`
361 :return: an instance of a BucketListResultSet that handles paging, etc
362 """
363 return MultiPartUploadListResultSet(self, key_marker,
364 upload_id_marker,
365 headers,
366 encoding_type=encoding_type)
367
368 def _get_all_query_args(self, params, initial_query_string=''):
369 pairs = []
370
371 if initial_query_string:
372 pairs.append(initial_query_string)
373
374 for key, value in sorted(params.items(), key=lambda x: x[0]):
375 if value is None:
376 continue
377 key = key.replace('_', '-')
378 if key == 'maxkeys':
379 key = 'max-keys'
380 if not isinstance(value, six.string_types + (six.binary_type,)):
381 value = six.text_type(value)
382 if not isinstance(value, six.binary_type):
383 value = value.encode('utf-8')
384 if value:
385 pairs.append(u'%s=%s' % (
386 urllib.parse.quote(key),
387 urllib.parse.quote(value)
388 ))
389
390 return '&'.join(pairs)
391
392 def _get_all(self, element_map, initial_query_string='',
393 headers=None, **params):
394 query_args = self._get_all_query_args(
395 params,
396 initial_query_string=initial_query_string
397 )
398 response = self.connection.make_request('GET', self.name,
399 headers=headers,
400 query_args=query_args)
401 body = response.read()
402 boto.log.debug(body)
403 if response.status == 200:
404 rs = ResultSet(element_map)
405 h = handler.XmlHandler(rs, self)
406 if not isinstance(body, bytes):
407 body = body.encode('utf-8')
408 xml.sax.parseString(body, h)
409 return rs
410 else:
411 raise self.connection.provider.storage_response_error(
412 response.status, response.reason, body)
413
414 def validate_kwarg_names(self, kwargs, names):
415 """
416 Checks that all named arguments are in the specified list of names.
417
418 :type kwargs: dict
419 :param kwargs: Dictionary of kwargs to validate.
420
421 :type names: list
422 :param names: List of possible named arguments.
423 """
424 for kwarg in kwargs:
425 if kwarg not in names:
426 raise TypeError('Invalid argument "%s"!' % kwarg)
427
428 def get_all_keys(self, headers=None, **params):
429 """
430 A lower-level method for listing contents of a bucket. This
431 closely models the actual S3 API and requires you to manually
432 handle the paging of results. For a higher-level method that
433 handles the details of paging for you, you can use the list
434 method.
435
436 :type max_keys: int
437 :param max_keys: The maximum number of keys to retrieve
438
439 :type prefix: string
440 :param prefix: The prefix of the keys you want to retrieve
441
442 :type marker: string
443 :param marker: The "marker" of where you are in the result set
444
445 :type delimiter: string
446 :param delimiter: If this optional, Unicode string parameter
447 is included with your request, then keys that contain the
448 same string between the prefix and the first occurrence of
449 the delimiter will be rolled up into a single result
450 element in the CommonPrefixes collection. These rolled-up
451 keys are not returned elsewhere in the response.
452
453 :param encoding_type: Requests Amazon S3 to encode the response and
454 specifies the encoding method to use.
455
456 An object key can contain any Unicode character; however, XML 1.0
457 parser cannot parse some characters, such as characters with an
458 ASCII value from 0 to 10. For characters that are not supported in
459 XML 1.0, you can add this parameter to request that Amazon S3
460 encode the keys in the response.
461
462 Valid options: ``url``
463 :type encoding_type: string
464
465 :rtype: ResultSet
466 :return: The result from S3 listing the keys requested
467
468 """
469 self.validate_kwarg_names(params, ['maxkeys', 'max_keys', 'prefix',
470 'marker', 'delimiter',
471 'encoding_type'])
472 return self._get_all([('Contents', self.key_class),
473 ('CommonPrefixes', Prefix)],
474 '', headers, **params)
475
476 def get_all_versions(self, headers=None, **params):
477 """
478 A lower-level, version-aware method for listing contents of a
479 bucket. This closely models the actual S3 API and requires
480 you to manually handle the paging of results. For a
481 higher-level method that handles the details of paging for
482 you, you can use the list method.
483
484 :type max_keys: int
485 :param max_keys: The maximum number of keys to retrieve
486
487 :type prefix: string
488 :param prefix: The prefix of the keys you want to retrieve
489
490 :type key_marker: string
491 :param key_marker: The "marker" of where you are in the result set
492 with respect to keys.
493
494 :type version_id_marker: string
495 :param version_id_marker: The "marker" of where you are in the result
496 set with respect to version-id's.
497
498 :type delimiter: string
499 :param delimiter: If this optional, Unicode string parameter
500 is included with your request, then keys that contain the
501 same string between the prefix and the first occurrence of
502 the delimiter will be rolled up into a single result
503 element in the CommonPrefixes collection. These rolled-up
504 keys are not returned elsewhere in the response.
505
506 :param encoding_type: Requests Amazon S3 to encode the response and
507 specifies the encoding method to use.
508
509 An object key can contain any Unicode character; however, XML 1.0
510 parser cannot parse some characters, such as characters with an
511 ASCII value from 0 to 10. For characters that are not supported in
512 XML 1.0, you can add this parameter to request that Amazon S3
513 encode the keys in the response.
514
515 Valid options: ``url``
516 :type encoding_type: string
517
518 :rtype: ResultSet
519 :return: The result from S3 listing the keys requested
520 """
521 self.validate_get_all_versions_params(params)
522 return self._get_all([('Version', self.key_class),
523 ('CommonPrefixes', Prefix),
524 ('DeleteMarker', DeleteMarker)],
525 'versions', headers, **params)
526
527 def validate_get_all_versions_params(self, params):
528 """
529 Validate that the parameters passed to get_all_versions are valid.
530 Overridden by subclasses that allow a different set of parameters.
531
532 :type params: dict
533 :param params: Parameters to validate.
534 """
535 self.validate_kwarg_names(
536 params, ['maxkeys', 'max_keys', 'prefix', 'key_marker',
537 'version_id_marker', 'delimiter', 'encoding_type'])
538
539 def get_all_multipart_uploads(self, headers=None, **params):
540 """
541 A lower-level, version-aware method for listing active
542 MultiPart uploads for a bucket. This closely models the
543 actual S3 API and requires you to manually handle the paging
544 of results. For a higher-level method that handles the
545 details of paging for you, you can use the list method.
546
547 :type max_uploads: int
548 :param max_uploads: The maximum number of uploads to retrieve.
549 Default value is 1000.
550
551 :type key_marker: string
552 :param key_marker: Together with upload_id_marker, this
553 parameter specifies the multipart upload after which
554 listing should begin. If upload_id_marker is not
555 specified, only the keys lexicographically greater than
556 the specified key_marker will be included in the list.
557
558 If upload_id_marker is specified, any multipart uploads
559 for a key equal to the key_marker might also be included,
560 provided those multipart uploads have upload IDs
561 lexicographically greater than the specified
562 upload_id_marker.
563
564 :type upload_id_marker: string
565 :param upload_id_marker: Together with key-marker, specifies
566 the multipart upload after which listing should begin. If
567 key_marker is not specified, the upload_id_marker
568 parameter is ignored. Otherwise, any multipart uploads
569 for a key equal to the key_marker might be included in the
570 list only if they have an upload ID lexicographically
571 greater than the specified upload_id_marker.
572
573 :type encoding_type: string
574 :param encoding_type: Requests Amazon S3 to encode the response and
575 specifies the encoding method to use.
576
577 An object key can contain any Unicode character; however, XML 1.0
578 parser cannot parse some characters, such as characters with an
579 ASCII value from 0 to 10. For characters that are not supported in
580 XML 1.0, you can add this parameter to request that Amazon S3
581 encode the keys in the response.
582
583 Valid options: ``url``
584
585 :type delimiter: string
586 :param delimiter: Character you use to group keys.
587 All keys that contain the same string between the prefix, if
588 specified, and the first occurrence of the delimiter after the
589 prefix are grouped under a single result element, CommonPrefixes.
590 If you don't specify the prefix parameter, then the substring
591 starts at the beginning of the key. The keys that are grouped
592 under CommonPrefixes result element are not returned elsewhere
593 in the response.
594
595 :type prefix: string
596 :param prefix: Lists in-progress uploads only for those keys that
597 begin with the specified prefix. You can use prefixes to separate
598 a bucket into different grouping of keys. (You can think of using
599 prefix to make groups in the same way you'd use a folder in a
600 file system.)
601
602 :rtype: ResultSet
603 :return: The result from S3 listing the uploads requested
604
605 """
606 self.validate_kwarg_names(params, ['max_uploads', 'key_marker',
607 'upload_id_marker', 'encoding_type',
608 'delimiter', 'prefix'])
609 return self._get_all([('Upload', MultiPartUpload),
610 ('CommonPrefixes', Prefix)],
611 'uploads', headers, **params)
612
613 def new_key(self, key_name=None):
614 """
615 Creates a new key
616
617 :type key_name: string
618 :param key_name: The name of the key to create
619
620 :rtype: :class:`boto.s3.key.Key` or subclass
621 :returns: An instance of the newly created key object
622 """
623 if not key_name:
624 raise ValueError('Empty key names are not allowed')
625 return self.key_class(self, key_name)
626
627 def generate_url(self, expires_in, method='GET', headers=None,
628 force_http=False, response_headers=None,
629 expires_in_absolute=False):
630 return self.connection.generate_url(expires_in, method, self.name,
631 headers=headers,
632 force_http=force_http,
633 response_headers=response_headers,
634 expires_in_absolute=expires_in_absolute)
635
636 def delete_keys(self, keys, quiet=False, mfa_token=None, headers=None):
637 """
638 Deletes a set of keys using S3's Multi-object delete API. If a
639 VersionID is specified for that key then that version is removed.
640 Returns a MultiDeleteResult Object, which contains Deleted
641 and Error elements for each key you ask to delete.
642
643 :type keys: list
644 :param keys: A list of either key_names or (key_name, versionid) pairs
645 or a list of Key instances.
646
647 :type quiet: boolean
648 :param quiet: In quiet mode the response includes only keys
649 where the delete operation encountered an error. For a
650 successful deletion, the operation does not return any
651 information about the delete in the response body.
652
653 :type mfa_token: tuple or list of strings
654 :param mfa_token: A tuple or list consisting of the serial
655 number from the MFA device and the current value of the
656 six-digit token associated with the device. This value is
657 required anytime you are deleting versioned objects from a
658 bucket that has the MFADelete option on the bucket.
659
660 :returns: An instance of MultiDeleteResult
661 """
662 ikeys = iter(keys)
663 result = MultiDeleteResult(self)
664 provider = self.connection.provider
665 query_args = 'delete'
666
667 def delete_keys2(hdrs):
668 hdrs = hdrs or {}
669 data = u"""<?xml version="1.0" encoding="UTF-8"?>"""
670 data += u"<Delete>"
671 if quiet:
672 data += u"<Quiet>true</Quiet>"
673 count = 0
674 while count < 1000:
675 try:
676 key = next(ikeys)
677 except StopIteration:
678 break
679 if isinstance(key, six.string_types):
680 key_name = key
681 version_id = None
682 elif isinstance(key, tuple) and len(key) == 2:
683 key_name, version_id = key
684 elif (isinstance(key, Key) or isinstance(key, DeleteMarker)) and key.name:
685 key_name = key.name
686 version_id = key.version_id
687 else:
688 if isinstance(key, Prefix):
689 key_name = key.name
690 code = 'PrefixSkipped' # Don't delete Prefix
691 else:
692 key_name = repr(key) # try get a string
693 code = 'InvalidArgument' # other unknown type
694 message = 'Invalid. No delete action taken for this object.'
695 error = Error(key_name, code=code, message=message)
696 result.errors.append(error)
697 continue
698 count += 1
699 data += u"<Object><Key>%s</Key>" % xml.sax.saxutils.escape(key_name)
700 if version_id:
701 data += u"<VersionId>%s</VersionId>" % version_id
702 data += u"</Object>"
703 data += u"</Delete>"
704 if count <= 0:
705 return False # no more
706 data = data.encode('utf-8')
707 fp = BytesIO(data)
708 md5 = boto.utils.compute_md5(fp)
709 hdrs['Content-MD5'] = md5[1]
710 hdrs['Content-Type'] = 'text/xml'
711 if mfa_token:
712 hdrs[provider.mfa_header] = ' '.join(mfa_token)
713 response = self.connection.make_request('POST', self.name,
714 headers=hdrs,
715 query_args=query_args,
716 data=data)
717 body = response.read()
718 if response.status == 200:
719 h = handler.XmlHandler(result, self)
720 if not isinstance(body, bytes):
721 body = body.encode('utf-8')
722 xml.sax.parseString(body, h)
723 return count >= 1000 # more?
724 else:
725 raise provider.storage_response_error(response.status,
726 response.reason,
727 body)
728 while delete_keys2(headers):
729 pass
730 return result
731
732 def delete_key(self, key_name, headers=None, version_id=None,
733 mfa_token=None):
734 """
735 Deletes a key from the bucket. If a version_id is provided,
736 only that version of the key will be deleted.
737
738 :type key_name: string
739 :param key_name: The key name to delete
740
741 :type version_id: string
742 :param version_id: The version ID (optional)
743
744 :type mfa_token: tuple or list of strings
745 :param mfa_token: A tuple or list consisting of the serial
746 number from the MFA device and the current value of the
747 six-digit token associated with the device. This value is
748 required anytime you are deleting versioned objects from a
749 bucket that has the MFADelete option on the bucket.
750
751 :rtype: :class:`boto.s3.key.Key` or subclass
752 :returns: A key object holding information on what was
753 deleted. The Caller can see if a delete_marker was
754 created or removed and what version_id the delete created
755 or removed.
756 """
757 if not key_name:
758 raise ValueError('Empty key names are not allowed')
759 return self._delete_key_internal(key_name, headers=headers,
760 version_id=version_id,
761 mfa_token=mfa_token,
762 query_args_l=None)
763
764 def _delete_key_internal(self, key_name, headers=None, version_id=None,
765 mfa_token=None, query_args_l=None):
766 query_args_l = query_args_l or []
767 provider = self.connection.provider
768 if version_id:
769 query_args_l.append('versionId=%s' % version_id)
770 query_args = '&'.join(query_args_l) or None
771 if mfa_token:
772 if not headers:
773 headers = {}
774 headers[provider.mfa_header] = ' '.join(mfa_token)
775 response = self.connection.make_request('DELETE', self.name, key_name,
776 headers=headers,
777 query_args=query_args)
778 body = response.read()
779 if response.status != 204:
780 raise provider.storage_response_error(response.status,
781 response.reason, body)
782 else:
783 # return a key object with information on what was deleted.
784 k = self.key_class(self)
785 k.name = key_name
786 k.handle_version_headers(response)
787 k.handle_addl_headers(response.getheaders())
788 return k
789
790 def copy_key(self, new_key_name, src_bucket_name,
791 src_key_name, metadata=None, src_version_id=None,
792 storage_class='STANDARD', preserve_acl=False,
793 encrypt_key=False, headers=None, query_args=None):
794 """
795 Create a new key in the bucket by copying another existing key.
796
797 :type new_key_name: string
798 :param new_key_name: The name of the new key
799
800 :type src_bucket_name: string
801 :param src_bucket_name: The name of the source bucket
802
803 :type src_key_name: string
804 :param src_key_name: The name of the source key
805
806 :type src_version_id: string
807 :param src_version_id: The version id for the key. This param
808 is optional. If not specified, the newest version of the
809 key will be copied.
810
811 :type metadata: dict
812 :param metadata: Metadata to be associated with new key. If
813 metadata is supplied, it will replace the metadata of the
814 source key being copied. If no metadata is supplied, the
815 source key's metadata will be copied to the new key.
816
817 :type storage_class: string
818 :param storage_class: The storage class of the new key. By
819 default, the new key will use the standard storage class.
820 Possible values are: STANDARD | REDUCED_REDUNDANCY
821
822 :type preserve_acl: bool
823 :param preserve_acl: If True, the ACL from the source key will
824 be copied to the destination key. If False, the
825 destination key will have the default ACL. Note that
826 preserving the ACL in the new key object will require two
827 additional API calls to S3, one to retrieve the current
828 ACL and one to set that ACL on the new object. If you
829 don't care about the ACL, a value of False will be
830 significantly more efficient.
831
832 :type encrypt_key: bool
833 :param encrypt_key: If True, the new copy of the object will
834 be encrypted on the server-side by S3 and will be stored
835 in an encrypted form while at rest in S3.
836
837 :type headers: dict
838 :param headers: A dictionary of header name/value pairs.
839
840 :type query_args: string
841 :param query_args: A string of additional querystring arguments
842 to append to the request
843
844 :rtype: :class:`boto.s3.key.Key` or subclass
845 :returns: An instance of the newly created key object
846 """
847 headers = headers or {}
848 provider = self.connection.provider
849 src_key_name = boto.utils.get_utf8_value(src_key_name)
850 if preserve_acl:
851 if self.name == src_bucket_name:
852 src_bucket = self
853 else:
854 src_bucket = self.connection.get_bucket(
855 src_bucket_name, validate=False, headers=headers)
856 acl = src_bucket.get_xml_acl(src_key_name, headers=headers)
857 if encrypt_key:
858 headers[provider.server_side_encryption_header] = 'AES256'
859 src = '%s/%s' % (src_bucket_name, urllib.parse.quote(src_key_name))
860 if src_version_id:
861 src += '?versionId=%s' % src_version_id
862 headers[provider.copy_source_header] = str(src)
863 # make sure storage_class_header key exists before accessing it
864 if provider.storage_class_header and storage_class:
865 headers[provider.storage_class_header] = storage_class
866 if metadata is not None:
867 headers[provider.metadata_directive_header] = 'REPLACE'
868 headers = boto.utils.merge_meta(headers, metadata, provider)
869 elif not query_args: # Can't use this header with multi-part copy.
870 headers[provider.metadata_directive_header] = 'COPY'
871 response = self.connection.make_request('PUT', self.name, new_key_name,
872 headers=headers,
873 query_args=query_args)
874 body = response.read()
875 if response.status == 200:
876 key = self.new_key(new_key_name)
877 h = handler.XmlHandler(key, self)
878 if not isinstance(body, bytes):
879 body = body.encode('utf-8')
880 xml.sax.parseString(body, h)
881 if hasattr(key, 'Error'):
882 raise provider.storage_copy_error(key.Code, key.Message, body)
883 key.handle_version_headers(response)
884 key.handle_addl_headers(response.getheaders())
885 if preserve_acl:
886 self.set_xml_acl(acl, new_key_name)
887 return key
888 else:
889 raise provider.storage_response_error(response.status,
890 response.reason, body)
891
892 def set_canned_acl(self, acl_str, key_name='', headers=None,
893 version_id=None):
894 assert acl_str in CannedACLStrings
895
896 if headers:
897 headers[self.connection.provider.acl_header] = acl_str
898 else:
899 headers = {self.connection.provider.acl_header: acl_str}
900
901 query_args = 'acl'
902 if version_id:
903 query_args += '&versionId=%s' % version_id
904 response = self.connection.make_request('PUT', self.name, key_name,
905 headers=headers, query_args=query_args)
906 body = response.read()
907 if response.status != 200:
908 raise self.connection.provider.storage_response_error(
909 response.status, response.reason, body)
910
911 def get_xml_acl(self, key_name='', headers=None, version_id=None):
912 query_args = 'acl'
913 if version_id:
914 query_args += '&versionId=%s' % version_id
915 response = self.connection.make_request('GET', self.name, key_name,
916 query_args=query_args,
917 headers=headers)
918 body = response.read()
919 if response.status != 200:
920 raise self.connection.provider.storage_response_error(
921 response.status, response.reason, body)
922 return body
923
924 def set_xml_acl(self, acl_str, key_name='', headers=None, version_id=None,
925 query_args='acl'):
926 if version_id:
927 query_args += '&versionId=%s' % version_id
928 if not isinstance(acl_str, bytes):
929 acl_str = acl_str.encode('utf-8')
930 response = self.connection.make_request('PUT', self.name, key_name,
931 data=acl_str,
932 query_args=query_args,
933 headers=headers)
934 body = response.read()
935 if response.status != 200:
936 raise self.connection.provider.storage_response_error(
937 response.status, response.reason, body)
938
939 def set_acl(self, acl_or_str, key_name='', headers=None, version_id=None):
940 if isinstance(acl_or_str, Policy):
941 self.set_xml_acl(acl_or_str.to_xml(), key_name,
942 headers, version_id)
943 else:
944 self.set_canned_acl(acl_or_str, key_name,
945 headers, version_id)
946
947 def get_acl(self, key_name='', headers=None, version_id=None):
948 query_args = 'acl'
949 if version_id:
950 query_args += '&versionId=%s' % version_id
951 response = self.connection.make_request('GET', self.name, key_name,
952 query_args=query_args,
953 headers=headers)
954 body = response.read()
955 if response.status == 200:
956 policy = Policy(self)
957 h = handler.XmlHandler(policy, self)
958 if not isinstance(body, bytes):
959 body = body.encode('utf-8')
960 xml.sax.parseString(body, h)
961 return policy
962 else:
963 raise self.connection.provider.storage_response_error(
964 response.status, response.reason, body)
965
966 def set_subresource(self, subresource, value, key_name='', headers=None,
967 version_id=None):
968 """
969 Set a subresource for a bucket or key.
970
971 :type subresource: string
972 :param subresource: The subresource to set.
973
974 :type value: string
975 :param value: The value of the subresource.
976
977 :type key_name: string
978 :param key_name: The key to operate on, or None to operate on the
979 bucket.
980
981 :type headers: dict
982 :param headers: Additional HTTP headers to include in the request.
983
984 :type src_version_id: string
985 :param src_version_id: Optional. The version id of the key to
986 operate on. If not specified, operate on the newest
987 version.
988 """
989 if not subresource:
990 raise TypeError('set_subresource called with subresource=None')
991 query_args = subresource
992 if version_id:
993 query_args += '&versionId=%s' % version_id
994 if not isinstance(value, bytes):
995 value = value.encode('utf-8')
996 response = self.connection.make_request('PUT', self.name, key_name,
997 data=value,
998 query_args=query_args,
999 headers=headers)
1000 body = response.read()
1001 if response.status != 200:
1002 raise self.connection.provider.storage_response_error(
1003 response.status, response.reason, body)
1004
1005 def get_subresource(self, subresource, key_name='', headers=None,
1006 version_id=None):
1007 """
1008 Get a subresource for a bucket or key.
1009
1010 :type subresource: string
1011 :param subresource: The subresource to get.
1012
1013 :type key_name: string
1014 :param key_name: The key to operate on, or None to operate on the
1015 bucket.
1016
1017 :type headers: dict
1018 :param headers: Additional HTTP headers to include in the request.
1019
1020 :type src_version_id: string
1021 :param src_version_id: Optional. The version id of the key to
1022 operate on. If not specified, operate on the newest
1023 version.
1024
1025 :rtype: string
1026 :returns: The value of the subresource.
1027 """
1028 if not subresource:
1029 raise TypeError('get_subresource called with subresource=None')
1030 query_args = subresource
1031 if version_id:
1032 query_args += '&versionId=%s' % version_id
1033 response = self.connection.make_request('GET', self.name, key_name,
1034 query_args=query_args,
1035 headers=headers)
1036 body = response.read()
1037 if response.status != 200:
1038 raise self.connection.provider.storage_response_error(
1039 response.status, response.reason, body)
1040 return body
1041
1042 def make_public(self, recursive=False, headers=None):
1043 self.set_canned_acl('public-read', headers=headers)
1044 if recursive:
1045 for key in self:
1046 self.set_canned_acl('public-read', key.name, headers=headers)
1047
1048 def add_email_grant(self, permission, email_address,
1049 recursive=False, headers=None):
1050 """
1051 Convenience method that provides a quick way to add an email grant
1052 to a bucket. This method retrieves the current ACL, creates a new
1053 grant based on the parameters passed in, adds that grant to the ACL
1054 and then PUT's the new ACL back to S3.
1055
1056 :type permission: string
1057 :param permission: The permission being granted. Should be one of:
1058 (READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).
1059
1060 :type email_address: string
1061 :param email_address: The email address associated with the AWS
1062 account your are granting the permission to.
1063
1064 :type recursive: boolean
1065 :param recursive: A boolean value to controls whether the
1066 command will apply the grant to all keys within the bucket
1067 or not. The default value is False. By passing a True
1068 value, the call will iterate through all keys in the
1069 bucket and apply the same grant to each key. CAUTION: If
1070 you have a lot of keys, this could take a long time!
1071 """
1072 if permission not in S3Permissions:
1073 raise self.connection.provider.storage_permissions_error(
1074 'Unknown Permission: %s' % permission)
1075 policy = self.get_acl(headers=headers)
1076 policy.acl.add_email_grant(permission, email_address)
1077 self.set_acl(policy, headers=headers)
1078 if recursive:
1079 for key in self:
1080 key.add_email_grant(permission, email_address, headers=headers)
1081
1082 def add_user_grant(self, permission, user_id, recursive=False,
1083 headers=None, display_name=None):
1084 """
1085 Convenience method that provides a quick way to add a canonical
1086 user grant to a bucket. This method retrieves the current ACL,
1087 creates a new grant based on the parameters passed in, adds that
1088 grant to the ACL and then PUT's the new ACL back to S3.
1089
1090 :type permission: string
1091 :param permission: The permission being granted. Should be one of:
1092 (READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).
1093
1094 :type user_id: string
1095 :param user_id: The canonical user id associated with the AWS
1096 account your are granting the permission to.
1097
1098 :type recursive: boolean
1099 :param recursive: A boolean value to controls whether the
1100 command will apply the grant to all keys within the bucket
1101 or not. The default value is False. By passing a True
1102 value, the call will iterate through all keys in the
1103 bucket and apply the same grant to each key. CAUTION: If
1104 you have a lot of keys, this could take a long time!
1105
1106 :type display_name: string
1107 :param display_name: An option string containing the user's
1108 Display Name. Only required on Walrus.
1109 """
1110 if permission not in S3Permissions:
1111 raise self.connection.provider.storage_permissions_error(
1112 'Unknown Permission: %s' % permission)
1113 policy = self.get_acl(headers=headers)
1114 policy.acl.add_user_grant(permission, user_id,
1115 display_name=display_name)
1116 self.set_acl(policy, headers=headers)
1117 if recursive:
1118 for key in self:
1119 key.add_user_grant(permission, user_id, headers=headers,
1120 display_name=display_name)
1121
1122 def list_grants(self, headers=None):
1123 policy = self.get_acl(headers=headers)
1124 return policy.acl.grants
1125
1126 def get_location(self, headers=None):
1127 """
1128 Returns the LocationConstraint for the bucket.
1129
1130 :rtype: str
1131 :return: The LocationConstraint for the bucket or the empty
1132 string if no constraint was specified when bucket was created.
1133 """
1134 response = self.connection.make_request('GET', self.name,
1135 headers=headers,
1136 query_args='location')
1137 body = response.read()
1138 if response.status == 200:
1139 rs = ResultSet(self)
1140 h = handler.XmlHandler(rs, self)
1141 if not isinstance(body, bytes):
1142 body = body.encode('utf-8')
1143 xml.sax.parseString(body, h)
1144 return rs.LocationConstraint
1145 else:
1146 raise self.connection.provider.storage_response_error(
1147 response.status, response.reason, body)
1148
1149 def set_xml_logging(self, logging_str, headers=None):
1150 """
1151 Set logging on a bucket directly to the given xml string.
1152
1153 :type logging_str: unicode string
1154 :param logging_str: The XML for the bucketloggingstatus which
1155 will be set. The string will be converted to utf-8 before
1156 it is sent. Usually, you will obtain this XML from the
1157 BucketLogging object.
1158
1159 :rtype: bool
1160 :return: True if ok or raises an exception.
1161 """
1162 body = logging_str
1163 if not isinstance(body, bytes):
1164 body = body.encode('utf-8')
1165 response = self.connection.make_request('PUT', self.name, data=body,
1166 query_args='logging', headers=headers)
1167 body = response.read()
1168 if response.status == 200:
1169 return True
1170 else:
1171 raise self.connection.provider.storage_response_error(
1172 response.status, response.reason, body)
1173
1174 def enable_logging(self, target_bucket, target_prefix='',
1175 grants=None, headers=None):
1176 """
1177 Enable logging on a bucket.
1178
1179 :type target_bucket: bucket or string
1180 :param target_bucket: The bucket to log to.
1181
1182 :type target_prefix: string
1183 :param target_prefix: The prefix which should be prepended to the
1184 generated log files written to the target_bucket.
1185
1186 :type grants: list of Grant objects
1187 :param grants: A list of extra permissions which will be granted on
1188 the log files which are created.
1189
1190 :rtype: bool
1191 :return: True if ok or raises an exception.
1192 """
1193 if isinstance(target_bucket, Bucket):
1194 target_bucket = target_bucket.name
1195 blogging = BucketLogging(target=target_bucket, prefix=target_prefix,
1196 grants=grants)
1197 return self.set_xml_logging(blogging.to_xml(), headers=headers)
1198
1199 def disable_logging(self, headers=None):
1200 """
1201 Disable logging on a bucket.
1202
1203 :rtype: bool
1204 :return: True if ok or raises an exception.
1205 """
1206 blogging = BucketLogging()
1207 return self.set_xml_logging(blogging.to_xml(), headers=headers)
1208
1209 def get_logging_status(self, headers=None):
1210 """
1211 Get the logging status for this bucket.
1212
1213 :rtype: :class:`boto.s3.bucketlogging.BucketLogging`
1214 :return: A BucketLogging object for this bucket.
1215 """
1216 response = self.connection.make_request('GET', self.name,
1217 query_args='logging', headers=headers)
1218 body = response.read()
1219 if response.status == 200:
1220 blogging = BucketLogging()
1221 h = handler.XmlHandler(blogging, self)
1222 if not isinstance(body, bytes):
1223 body = body.encode('utf-8')
1224 xml.sax.parseString(body, h)
1225 return blogging
1226 else:
1227 raise self.connection.provider.storage_response_error(
1228 response.status, response.reason, body)
1229
1230 def set_as_logging_target(self, headers=None):
1231 """
1232 Setup the current bucket as a logging target by granting the necessary
1233 permissions to the LogDelivery group to write log files to this bucket.
1234 """
1235 policy = self.get_acl(headers=headers)
1236 g1 = Grant(permission='WRITE', type='Group', uri=self.LoggingGroup)
1237 g2 = Grant(permission='READ_ACP', type='Group', uri=self.LoggingGroup)
1238 policy.acl.add_grant(g1)
1239 policy.acl.add_grant(g2)
1240 self.set_acl(policy, headers=headers)
1241
1242 def get_request_payment(self, headers=None):
1243 response = self.connection.make_request('GET', self.name,
1244 query_args='requestPayment', headers=headers)
1245 body = response.read()
1246 if response.status == 200:
1247 return body
1248 else:
1249 raise self.connection.provider.storage_response_error(
1250 response.status, response.reason, body)
1251
1252 def set_request_payment(self, payer='BucketOwner', headers=None):
1253 body = self.BucketPaymentBody % payer
1254 response = self.connection.make_request('PUT', self.name, data=body,
1255 query_args='requestPayment', headers=headers)
1256 body = response.read()
1257 if response.status == 200:
1258 return True
1259 else:
1260 raise self.connection.provider.storage_response_error(
1261 response.status, response.reason, body)
1262
1263 def configure_versioning(self, versioning, mfa_delete=False,
1264 mfa_token=None, headers=None):
1265 """
1266 Configure versioning for this bucket.
1267
1268 ..note:: This feature is currently in beta.
1269
1270 :type versioning: bool
1271 :param versioning: A boolean indicating whether version is
1272 enabled (True) or disabled (False).
1273
1274 :type mfa_delete: bool
1275 :param mfa_delete: A boolean indicating whether the
1276 Multi-Factor Authentication Delete feature is enabled
1277 (True) or disabled (False). If mfa_delete is enabled then
1278 all Delete operations will require the token from your MFA
1279 device to be passed in the request.
1280
1281 :type mfa_token: tuple or list of strings
1282 :param mfa_token: A tuple or list consisting of the serial
1283 number from the MFA device and the current value of the
1284 six-digit token associated with the device. This value is
1285 required when you are changing the status of the MfaDelete
1286 property of the bucket.
1287 """
1288 if versioning:
1289 ver = 'Enabled'
1290 else:
1291 ver = 'Suspended'
1292 if mfa_delete:
1293 mfa = 'Enabled'
1294 else:
1295 mfa = 'Disabled'
1296 body = self.VersioningBody % (ver, mfa)
1297 if mfa_token:
1298 if not headers:
1299 headers = {}
1300 provider = self.connection.provider
1301 headers[provider.mfa_header] = ' '.join(mfa_token)
1302 response = self.connection.make_request('PUT', self.name, data=body,
1303 query_args='versioning', headers=headers)
1304 body = response.read()
1305 if response.status == 200:
1306 return True
1307 else:
1308 raise self.connection.provider.storage_response_error(
1309 response.status, response.reason, body)
1310
1311 def get_versioning_status(self, headers=None):
1312 """
1313 Returns the current status of versioning on the bucket.
1314
1315 :rtype: dict
1316 :returns: A dictionary containing a key named 'Versioning'
1317 that can have a value of either Enabled, Disabled, or
1318 Suspended. Also, if MFADelete has ever been enabled on the
1319 bucket, the dictionary will contain a key named
1320 'MFADelete' which will have a value of either Enabled or
1321 Suspended.
1322 """
1323 response = self.connection.make_request('GET', self.name,
1324 query_args='versioning', headers=headers)
1325 body = response.read()
1326 if not isinstance(body, six.string_types):
1327 body = body.decode('utf-8')
1328 boto.log.debug(body)
1329 if response.status == 200:
1330 d = {}
1331 ver = re.search(self.VersionRE, body)
1332 if ver:
1333 d['Versioning'] = ver.group(1)
1334 mfa = re.search(self.MFADeleteRE, body)
1335 if mfa:
1336 d['MfaDelete'] = mfa.group(1)
1337 return d
1338 else:
1339 raise self.connection.provider.storage_response_error(
1340 response.status, response.reason, body)
1341
1342 def configure_lifecycle(self, lifecycle_config, headers=None):
1343 """
1344 Configure lifecycle for this bucket.
1345
1346 :type lifecycle_config: :class:`boto.s3.lifecycle.Lifecycle`
1347 :param lifecycle_config: The lifecycle configuration you want
1348 to configure for this bucket.
1349 """
1350 xml = lifecycle_config.to_xml()
1351 #xml = xml.encode('utf-8')
1352 fp = StringIO(xml)
1353 md5 = boto.utils.compute_md5(fp)
1354 if headers is None:
1355 headers = {}
1356 headers['Content-MD5'] = md5[1]
1357 headers['Content-Type'] = 'text/xml'
1358 response = self.connection.make_request('PUT', self.name,
1359 data=fp.getvalue(),
1360 query_args='lifecycle',
1361 headers=headers)
1362 body = response.read()
1363 if response.status == 200:
1364 return True
1365 else:
1366 raise self.connection.provider.storage_response_error(
1367 response.status, response.reason, body)
1368
1369 def get_lifecycle_config(self, headers=None):
1370 """
1371 Returns the current lifecycle configuration on the bucket.
1372
1373 :rtype: :class:`boto.s3.lifecycle.Lifecycle`
1374 :returns: A LifecycleConfig object that describes all current
1375 lifecycle rules in effect for the bucket.
1376 """
1377 response = self.connection.make_request('GET', self.name,
1378 query_args='lifecycle', headers=headers)
1379 body = response.read()
1380 boto.log.debug(body)
1381 if response.status == 200:
1382 lifecycle = Lifecycle()
1383 h = handler.XmlHandler(lifecycle, self)
1384 if not isinstance(body, bytes):
1385 body = body.encode('utf-8')
1386 xml.sax.parseString(body, h)
1387 return lifecycle
1388 else:
1389 raise self.connection.provider.storage_response_error(
1390 response.status, response.reason, body)
1391
1392 def delete_lifecycle_configuration(self, headers=None):
1393 """
1394 Removes all lifecycle configuration from the bucket.
1395 """
1396 response = self.connection.make_request('DELETE', self.name,
1397 query_args='lifecycle',
1398 headers=headers)
1399 body = response.read()
1400 boto.log.debug(body)
1401 if response.status == 204:
1402 return True
1403 else:
1404 raise self.connection.provider.storage_response_error(
1405 response.status, response.reason, body)
1406
1407 def configure_website(self, suffix=None, error_key=None,
1408 redirect_all_requests_to=None,
1409 routing_rules=None,
1410 headers=None):
1411 """
1412 Configure this bucket to act as a website
1413
1414 :type suffix: str
1415 :param suffix: Suffix that is appended to a request that is for a
1416 "directory" on the website endpoint (e.g. if the suffix is
1417 index.html and you make a request to samplebucket/images/
1418 the data that is returned will be for the object with the
1419 key name images/index.html). The suffix must not be empty
1420 and must not include a slash character.
1421
1422 :type error_key: str
1423 :param error_key: The object key name to use when a 4XX class
1424 error occurs. This is optional.
1425
1426 :type redirect_all_requests_to: :class:`boto.s3.website.RedirectLocation`
1427 :param redirect_all_requests_to: Describes the redirect behavior for
1428 every request to this bucket's website endpoint. If this value is
1429 non None, no other values are considered when configuring the
1430 website configuration for the bucket. This is an instance of
1431 ``RedirectLocation``.
1432
1433 :type routing_rules: :class:`boto.s3.website.RoutingRules`
1434 :param routing_rules: Object which specifies conditions
1435 and redirects that apply when the conditions are met.
1436
1437 """
1438 config = website.WebsiteConfiguration(
1439 suffix, error_key, redirect_all_requests_to,
1440 routing_rules)
1441 return self.set_website_configuration(config, headers=headers)
1442
1443 def set_website_configuration(self, config, headers=None):
1444 """
1445 :type config: boto.s3.website.WebsiteConfiguration
1446 :param config: Configuration data
1447 """
1448 return self.set_website_configuration_xml(config.to_xml(),
1449 headers=headers)
1450
1451
1452 def set_website_configuration_xml(self, xml, headers=None):
1453 """Upload xml website configuration"""
1454 response = self.connection.make_request('PUT', self.name, data=xml,
1455 query_args='website',
1456 headers=headers)
1457 body = response.read()
1458 if response.status == 200:
1459 return True
1460 else:
1461 raise self.connection.provider.storage_response_error(
1462 response.status, response.reason, body)
1463
1464 def get_website_configuration(self, headers=None):
1465 """
1466 Returns the current status of website configuration on the bucket.
1467
1468 :rtype: dict
1469 :returns: A dictionary containing a Python representation
1470 of the XML response from S3. The overall structure is:
1471
1472 * WebsiteConfiguration
1473
1474 * IndexDocument
1475
1476 * Suffix : suffix that is appended to request that
1477 is for a "directory" on the website endpoint
1478 * ErrorDocument
1479
1480 * Key : name of object to serve when an error occurs
1481
1482 """
1483 return self.get_website_configuration_with_xml(headers)[0]
1484
1485 def get_website_configuration_obj(self, headers=None):
1486 """Get the website configuration as a
1487 :class:`boto.s3.website.WebsiteConfiguration` object.
1488 """
1489 config_xml = self.get_website_configuration_xml(headers=headers)
1490 config = website.WebsiteConfiguration()
1491 h = handler.XmlHandler(config, self)
1492 xml.sax.parseString(config_xml, h)
1493 return config
1494
1495 def get_website_configuration_with_xml(self, headers=None):
1496 """
1497 Returns the current status of website configuration on the bucket as
1498 unparsed XML.
1499
1500 :rtype: 2-Tuple
1501 :returns: 2-tuple containing:
1502
1503 1) A dictionary containing a Python representation \
1504 of the XML response. The overall structure is:
1505
1506 * WebsiteConfiguration
1507
1508 * IndexDocument
1509
1510 * Suffix : suffix that is appended to request that \
1511 is for a "directory" on the website endpoint
1512
1513 * ErrorDocument
1514
1515 * Key : name of object to serve when an error occurs
1516
1517
1518 2) unparsed XML describing the bucket's website configuration
1519
1520 """
1521
1522 body = self.get_website_configuration_xml(headers=headers)
1523 e = boto.jsonresponse.Element()
1524 h = boto.jsonresponse.XmlHandler(e, None)
1525 h.parse(body)
1526 return e, body
1527
1528 def get_website_configuration_xml(self, headers=None):
1529 """Get raw website configuration xml"""
1530 response = self.connection.make_request('GET', self.name,
1531 query_args='website', headers=headers)
1532 body = response.read().decode('utf-8')
1533 boto.log.debug(body)
1534
1535 if response.status != 200:
1536 raise self.connection.provider.storage_response_error(
1537 response.status, response.reason, body)
1538 return body
1539
1540 def delete_website_configuration(self, headers=None):
1541 """
1542 Removes all website configuration from the bucket.
1543 """
1544 response = self.connection.make_request('DELETE', self.name,
1545 query_args='website', headers=headers)
1546 body = response.read()
1547 boto.log.debug(body)
1548 if response.status == 204:
1549 return True
1550 else:
1551 raise self.connection.provider.storage_response_error(
1552 response.status, response.reason, body)
1553
1554 def get_website_endpoint(self):
1555 """
1556 Returns the fully qualified hostname to use is you want to access this
1557 bucket as a website. This doesn't validate whether the bucket has
1558 been correctly configured as a website or not.
1559 """
1560 l = [self.name]
1561 l.append(S3WebsiteEndpointTranslate.translate_region(self.get_location()))
1562 l.append('.'.join(self.connection.host.split('.')[-2:]))
1563 return '.'.join(l)
1564
1565 def get_policy(self, headers=None):
1566 """
1567 Returns the JSON policy associated with the bucket. The policy
1568 is returned as an uninterpreted JSON string.
1569 """
1570 response = self.connection.make_request('GET', self.name,
1571 query_args='policy', headers=headers)
1572 body = response.read()
1573 if response.status == 200:
1574 return body
1575 else:
1576 raise self.connection.provider.storage_response_error(
1577 response.status, response.reason, body)
1578
1579 def set_policy(self, policy, headers=None):
1580 """
1581 Add or replace the JSON policy associated with the bucket.
1582
1583 :type policy: str
1584 :param policy: The JSON policy as a string.
1585 """
1586 response = self.connection.make_request('PUT', self.name,
1587 data=policy,
1588 query_args='policy',
1589 headers=headers)
1590 body = response.read()
1591 if response.status >= 200 and response.status <= 204:
1592 return True
1593 else:
1594 raise self.connection.provider.storage_response_error(
1595 response.status, response.reason, body)
1596
1597 def delete_policy(self, headers=None):
1598 response = self.connection.make_request('DELETE', self.name,
1599 data='/?policy',
1600 query_args='policy',
1601 headers=headers)
1602 body = response.read()
1603 if response.status >= 200 and response.status <= 204:
1604 return True
1605 else:
1606 raise self.connection.provider.storage_response_error(
1607 response.status, response.reason, body)
1608
1609 def set_cors_xml(self, cors_xml, headers=None):
1610 """
1611 Set the CORS (Cross-Origin Resource Sharing) for a bucket.
1612
1613 :type cors_xml: str
1614 :param cors_xml: The XML document describing your desired
1615 CORS configuration. See the S3 documentation for details
1616 of the exact syntax required.
1617 """
1618 fp = StringIO(cors_xml)
1619 md5 = boto.utils.compute_md5(fp)
1620 if headers is None:
1621 headers = {}
1622 headers['Content-MD5'] = md5[1]
1623 headers['Content-Type'] = 'text/xml'
1624 response = self.connection.make_request('PUT', self.name,
1625 data=fp.getvalue(),
1626 query_args='cors',
1627 headers=headers)
1628 body = response.read()
1629 if response.status == 200:
1630 return True
1631 else:
1632 raise self.connection.provider.storage_response_error(
1633 response.status, response.reason, body)
1634
1635 def set_cors(self, cors_config, headers=None):
1636 """
1637 Set the CORS for this bucket given a boto CORSConfiguration
1638 object.
1639
1640 :type cors_config: :class:`boto.s3.cors.CORSConfiguration`
1641 :param cors_config: The CORS configuration you want
1642 to configure for this bucket.
1643 """
1644 return self.set_cors_xml(cors_config.to_xml())
1645
1646 def get_cors_xml(self, headers=None):
1647 """
1648 Returns the current CORS configuration on the bucket as an
1649 XML document.
1650 """
1651 response = self.connection.make_request('GET', self.name,
1652 query_args='cors', headers=headers)
1653 body = response.read()
1654 boto.log.debug(body)
1655 if response.status == 200:
1656 return body
1657 else:
1658 raise self.connection.provider.storage_response_error(
1659 response.status, response.reason, body)
1660
1661 def get_cors(self, headers=None):
1662 """
1663 Returns the current CORS configuration on the bucket.
1664
1665 :rtype: :class:`boto.s3.cors.CORSConfiguration`
1666 :returns: A CORSConfiguration object that describes all current
1667 CORS rules in effect for the bucket.
1668 """
1669 body = self.get_cors_xml(headers)
1670 cors = CORSConfiguration()
1671 h = handler.XmlHandler(cors, self)
1672 xml.sax.parseString(body, h)
1673 return cors
1674
1675 def delete_cors(self, headers=None):
1676 """
1677 Removes all CORS configuration from the bucket.
1678 """
1679 response = self.connection.make_request('DELETE', self.name,
1680 query_args='cors',
1681 headers=headers)
1682 body = response.read()
1683 boto.log.debug(body)
1684 if response.status == 204:
1685 return True
1686 else:
1687 raise self.connection.provider.storage_response_error(
1688 response.status, response.reason, body)
1689
1690 def initiate_multipart_upload(self, key_name, headers=None,
1691 reduced_redundancy=False,
1692 metadata=None, encrypt_key=False,
1693 policy=None):
1694 """
1695 Start a multipart upload operation.
1696
1697 .. note::
1698
1699 Note: After you initiate multipart upload and upload one or more
1700 parts, you must either complete or abort multipart upload in order
1701 to stop getting charged for storage of the uploaded parts. Only
1702 after you either complete or abort multipart upload, Amazon S3
1703 frees up the parts storage and stops charging you for the parts
1704 storage.
1705
1706 :type key_name: string
1707 :param key_name: The name of the key that will ultimately
1708 result from this multipart upload operation. This will be
1709 exactly as the key appears in the bucket after the upload
1710 process has been completed.
1711
1712 :type headers: dict
1713 :param headers: Additional HTTP headers to send and store with the
1714 resulting key in S3.
1715
1716 :type reduced_redundancy: boolean
1717 :param reduced_redundancy: In multipart uploads, the storage
1718 class is specified when initiating the upload, not when
1719 uploading individual parts. So if you want the resulting
1720 key to use the reduced redundancy storage class set this
1721 flag when you initiate the upload.
1722
1723 :type metadata: dict
1724 :param metadata: Any metadata that you would like to set on the key
1725 that results from the multipart upload.
1726
1727 :type encrypt_key: bool
1728 :param encrypt_key: If True, the new copy of the object will
1729 be encrypted on the server-side by S3 and will be stored
1730 in an encrypted form while at rest in S3.
1731
1732 :type policy: :class:`boto.s3.acl.CannedACLStrings`
1733 :param policy: A canned ACL policy that will be applied to the
1734 new key (once completed) in S3.
1735 """
1736 query_args = 'uploads'
1737 provider = self.connection.provider
1738 headers = headers or {}
1739 if policy:
1740 headers[provider.acl_header] = policy
1741 if reduced_redundancy:
1742 storage_class_header = provider.storage_class_header
1743 if storage_class_header:
1744 headers[storage_class_header] = 'REDUCED_REDUNDANCY'
1745 # TODO: what if the provider doesn't support reduced redundancy?
1746 # (see boto.s3.key.Key.set_contents_from_file)
1747 if encrypt_key:
1748 headers[provider.server_side_encryption_header] = 'AES256'
1749 if metadata is None:
1750 metadata = {}
1751
1752 headers = boto.utils.merge_meta(headers, metadata,
1753 self.connection.provider)
1754 response = self.connection.make_request('POST', self.name, key_name,
1755 query_args=query_args,
1756 headers=headers)
1757 body = response.read()
1758 boto.log.debug(body)
1759 if response.status == 200:
1760 resp = MultiPartUpload(self)
1761 h = handler.XmlHandler(resp, self)
1762 if not isinstance(body, bytes):
1763 body = body.encode('utf-8')
1764 xml.sax.parseString(body, h)
1765 return resp
1766 else:
1767 raise self.connection.provider.storage_response_error(
1768 response.status, response.reason, body)
1769
1770 def complete_multipart_upload(self, key_name, upload_id,
1771 xml_body, headers=None):
1772 """
1773 Complete a multipart upload operation.
1774 """
1775 query_args = 'uploadId=%s' % upload_id
1776 if headers is None:
1777 headers = {}
1778 headers['Content-Type'] = 'text/xml'
1779 response = self.connection.make_request('POST', self.name, key_name,
1780 query_args=query_args,
1781 headers=headers, data=xml_body)
1782 contains_error = False
1783 body = response.read().decode('utf-8')
1784 # Some errors will be reported in the body of the response
1785 # even though the HTTP response code is 200. This check
1786 # does a quick and dirty peek in the body for an error element.
1787 if body.find('<Error>') > 0:
1788 contains_error = True
1789 boto.log.debug(body)
1790 if response.status == 200 and not contains_error:
1791 resp = CompleteMultiPartUpload(self)
1792 h = handler.XmlHandler(resp, self)
1793 if not isinstance(body, bytes):
1794 body = body.encode('utf-8')
1795 xml.sax.parseString(body, h)
1796 # Use a dummy key to parse various response headers
1797 # for versioning, encryption info and then explicitly
1798 # set the completed MPU object values from key.
1799 k = self.key_class(self)
1800 k.handle_version_headers(response)
1801 k.handle_encryption_headers(response)
1802 resp.version_id = k.version_id
1803 resp.encrypted = k.encrypted
1804 return resp
1805 else:
1806 raise self.connection.provider.storage_response_error(
1807 response.status, response.reason, body)
1808
1809 def cancel_multipart_upload(self, key_name, upload_id, headers=None):
1810 """
1811 To verify that all parts have been removed, so you don't get charged
1812 for the part storage, you should call the List Parts operation and
1813 ensure the parts list is empty.
1814 """
1815 query_args = 'uploadId=%s' % upload_id
1816 response = self.connection.make_request('DELETE', self.name, key_name,
1817 query_args=query_args,
1818 headers=headers)
1819 body = response.read()
1820 boto.log.debug(body)
1821 if response.status != 204:
1822 raise self.connection.provider.storage_response_error(
1823 response.status, response.reason, body)
1824
1825 def delete(self, headers=None):
1826 return self.connection.delete_bucket(self.name, headers=headers)
1827
1828 def get_tags(self, headers=None):
1829 response = self.get_xml_tags(headers)
1830 tags = Tags()
1831 h = handler.XmlHandler(tags, self)
1832 if not isinstance(response, bytes):
1833 response = response.encode('utf-8')
1834 xml.sax.parseString(response, h)
1835 return tags
1836
1837 def get_xml_tags(self, headers=None):
1838 response = self.connection.make_request('GET', self.name,
1839 query_args='tagging',
1840 headers=headers)
1841 body = response.read()
1842 if response.status == 200:
1843 return body
1844 else:
1845 raise self.connection.provider.storage_response_error(
1846 response.status, response.reason, body)
1847
1848 def set_xml_tags(self, tag_str, headers=None, query_args='tagging'):
1849 if headers is None:
1850 headers = {}
1851 md5 = boto.utils.compute_md5(StringIO(tag_str))
1852 headers['Content-MD5'] = md5[1]
1853 headers['Content-Type'] = 'text/xml'
1854 if not isinstance(tag_str, bytes):
1855 tag_str = tag_str.encode('utf-8')
1856 response = self.connection.make_request('PUT', self.name,
1857 data=tag_str,
1858 query_args=query_args,
1859 headers=headers)
1860 body = response.read()
1861 if response.status != 204:
1862 raise self.connection.provider.storage_response_error(
1863 response.status, response.reason, body)
1864 return True
1865
1866 def set_tags(self, tags, headers=None):
1867 return self.set_xml_tags(tags.to_xml(), headers=headers)
1868
1869 def delete_tags(self, headers=None):
1870 response = self.connection.make_request('DELETE', self.name,
1871 query_args='tagging',
1872 headers=headers)
1873 body = response.read()
1874 boto.log.debug(body)
1875 if response.status == 204:
1876 return True
1877 else:
1878 raise self.connection.provider.storage_response_error(
1879 response.status, response.reason, body)