diff --git a/localstack/aws/api/s3/__init__.py b/localstack/aws/api/s3/__init__.py index c10c9ce575ff2..ccfce15204b9c 100644 --- a/localstack/aws/api/s3/__init__.py +++ b/localstack/aws/api/s3/__init__.py @@ -831,6 +831,21 @@ class InvalidPart(ServiceException): PartNumber: Optional[PartNumber] +class NoSuchTagSet(ServiceException): + code: str = "NoSuchTagSet" + sender_fault: bool = False + status_code: int = 404 + BucketName: Optional[BucketName] + + +class InvalidTag(ServiceException): + code: str = "InvalidTag" + sender_fault: bool = False + status_code: int = 400 + TagKey: Optional[ObjectKey] + TagValue: Optional[Value] + + AbortDate = datetime diff --git a/localstack/aws/spec-patches.json b/localstack/aws/spec-patches.json index 8cbcc01c35efe..e7f4779d9d7aa 100644 --- a/localstack/aws/spec-patches.json +++ b/localstack/aws/spec-patches.json @@ -992,6 +992,45 @@ "documentation": "

One or more of the specified parts could not be found. The part might not have been uploaded, or the specified entity tag might not have matched the part's entity tag.

", "exception": true } + }, + { + "op": "add", + "path": "/shapes/NoSuchTagSet", + "value": { + "type": "structure", + "members": { + "BucketName": { + "shape": "BucketName" + } + }, + "error": { + "httpStatusCode": 404 + }, + "documentation": "

There is no tag set associated with the bucket.

", + "exception": true + } + }, + { + "op": "add", + "path": "/operations/PutBucketTagging/http/responseCode", + "value": 204 + }, + { + "op": "add", + "path": "/shapes/InvalidTag", + "value": { + "type": "structure", + "members": { + "TagKey": { + "shape": "ObjectKey" + }, + "TagValue": { + "shape": "Value" + } + }, + "documentation": "

The tag provided was not a valid tag. This error can occur if the tag did not pass input validation.

", + "exception": true + } } ] } diff --git a/localstack/services/s3/utils.py b/localstack/services/s3/utils.py index 7bb19eae51d2e..6d8384f94b7e4 100644 --- a/localstack/services/s3/utils.py +++ b/localstack/services/s3/utils.py @@ -11,7 +11,7 @@ import moto.s3.models as moto_s3_models from botocore.exceptions import ClientError from botocore.utils import InvalidArnException -from moto.s3.exceptions import MissingBucket +from moto.s3.exceptions import MalformedXML, MissingBucket from moto.s3.models import FakeBucket, FakeDeleteMarker, FakeKey from moto.s3.utils import clean_key_name @@ -23,6 +23,7 @@ CopySource, InvalidArgument, InvalidRange, + InvalidTag, LifecycleExpiration, LifecycleRule, LifecycleRules, @@ -34,6 +35,8 @@ ObjectVersionId, Owner, SSEKMSKeyId, + TaggingHeader, + TagSet, ) from localstack.aws.connect import connect_to from localstack.services.s3.constants import ( @@ -62,6 +65,8 @@ REGION_REGEX = r"[a-z]{2}-[a-z]+-[0-9]{1,}" PORT_REGEX = r"(:[\d]{0,6})?" +TAG_REGEX = re.compile(r"^[\w\s.:/=+\-@]*$") + S3_VIRTUAL_HOSTNAME_REGEX = ( # path based refs have at least valid bucket expression (separated by .) followed by .s3 r"^(http(s)?://)?((?!s3\.)[^\./]+)\." # the negative lookahead part is for considering buckets r"(((s3(-website)?\.({}\.)?)localhost(\.localstack\.cloud)?)|(localhost\.localstack\.cloud)|" @@ -72,10 +77,6 @@ ) _s3_virtual_host_regex = re.compile(S3_VIRTUAL_HOSTNAME_REGEX) -PATTERN_UUID = re.compile( - r"[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}" -) - RFC1123 = "%a, %d %b %Y %H:%M:%S GMT" @@ -690,3 +691,69 @@ def validate_dict_fields(data: dict, required_fields: set, optional_fields: set) return (set_fields := set(data)) >= required_fields and set_fields <= ( required_fields | optional_fields ) + + +def parse_tagging_header(tagging_header: TaggingHeader) -> dict: + try: + parsed_tags = urlparser.parse_qs(tagging_header, keep_blank_values=True) + tags: dict[str, str] = {} + for key, val in parsed_tags.items(): + if len(val) != 1 or not TAG_REGEX.match(key) or not TAG_REGEX.match(val[0]): + raise InvalidArgument( + "The header 'x-amz-tagging' shall be encoded as UTF-8 then URLEncoded URL query parameters without tag name duplicates.", + ArgumentName="x-amz-tagging", + ArgumentValue=tagging_header, + ) + elif key.startswith("aws:"): + raise + tags[key] = val[0] + return tags + + except ValueError: + raise InvalidArgument( + "The header 'x-amz-tagging' shall be encoded as UTF-8 then URLEncoded URL query parameters without tag name duplicates.", + ArgumentName="x-amz-tagging", + ArgumentValue=tagging_header, + ) + + +def validate_tag_set(tag_set: TagSet, type_set: Literal["bucket", "object"] = "bucket"): + keys = set() + for tag in tag_set: + if set(tag) != {"Key", "Value"}: + raise MalformedXML() + + key = tag["Key"] + if key in keys: + raise InvalidTag( + "Cannot provide multiple Tags with the same key", + TagKey=key, + ) + + if key.startswith("aws:"): + if type_set == "bucket": + message = "System tags cannot be added/updated by requester" + else: + message = "Your TagKey cannot be prefixed with aws:" + raise InvalidTag( + message, + TagKey=key, + ) + + if not TAG_REGEX.match(key): + raise InvalidTag( + "The TagKey you have provided is invalid", + TagKey=key, + ) + elif not TAG_REGEX.match(tag["Value"]): + raise InvalidTag( + "The TagValue you have provided is invalid", TagKey=key, TagValue=tag["Value"] + ) + + keys.add(key) + + +def get_unique_key_id( + bucket: BucketName, object_key: ObjectKey, version_id: ObjectVersionId +) -> str: + return f"{bucket}/{object_key}/{version_id or 'null'}" diff --git a/localstack/services/s3/v3/models.py b/localstack/services/s3/v3/models.py index 032537a456166..9ff22ae1b3e73 100644 --- a/localstack/services/s3/v3/models.py +++ b/localstack/services/s3/v3/models.py @@ -68,6 +68,8 @@ CrossRegionAttribute, LocalAttribute, ) +from localstack.utils.aws import arns +from localstack.utils.tagging import TaggingService # TODO: beware of timestamp data, we need the snapshot to be more precise for S3, with the different types # moto had a lot of issue with it? not sure about our parser/serializer @@ -127,19 +129,23 @@ def __init__( # see https://docs.aws.amazon.com/AmazonS3/latest/API/API_Owner.html self.owner = get_owner_for_account_id(account_id) + self.bucket_arn = arns.s3_bucket_arn(self.name) def get_object( self, key: ObjectKey, version_id: ObjectVersionId = None, - http_method: Literal["GET", "PUT", "HEAD"] = "GET", - ) -> "S3Object": + http_method: Literal["GET", "PUT", "HEAD", "DELETE"] = "GET", + raise_for_delete_marker: bool = True, + ) -> Union["S3Object", "S3DeleteMarker"]: """ :param key: the Object Key :param version_id: optional, the versionId of the object :param http_method: the HTTP method of the original call. This is necessary for the exception if the bucket is versioned or suspended see: https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeleteMarker.html + :param raise_for_delete_marker: optional, indicates if the method should raise an exception if the found object + is a S3DeleteMarker. If False, it can return a S3DeleteMarker :return: :raises NoSuchKey if the object key does not exist at all, or if the object is a DeleteMarker :raises MethodNotAllowed if the object is a DeleteMarker and the operation is not allowed against it @@ -168,7 +174,7 @@ def get_object( Key=key, VersionId=version_id, ) - elif isinstance(s3_object_version, S3DeleteMarker): + elif raise_for_delete_marker and isinstance(s3_object_version, S3DeleteMarker): raise MethodNotAllowed( "The specified method is not allowed against this resource.", Method=http_method, @@ -184,7 +190,7 @@ def get_object( if not s3_object: raise NoSuchKey("The specified key does not exist.", Key=key) - elif isinstance(s3_object, S3DeleteMarker): + elif raise_for_delete_marker and isinstance(s3_object, S3DeleteMarker): raise NoSuchKey( "The specified key does not exist.", Key=key, @@ -573,6 +579,9 @@ class S3Store(BaseStore): global_bucket_map: dict[BucketName, AccountId] = CrossAccountAttribute(default=dict) aws_managed_kms_key_id: SSEKMSKeyId = LocalAttribute(default=str) + # static tagging service instance + TAGS: TaggingService = CrossAccountAttribute(default=TaggingService) + class BucketCorsIndex: def __init__(self): diff --git a/localstack/services/s3/v3/provider.py b/localstack/services/s3/v3/provider.py index 019a1155c9fb3..f9411e7b5e8b3 100644 --- a/localstack/services/s3/v3/provider.py +++ b/localstack/services/s3/v3/provider.py @@ -38,18 +38,21 @@ DeleteMarkerEntry, DeleteObjectOutput, DeleteObjectsOutput, + DeleteObjectTaggingOutput, Delimiter, EncodingType, Error, FetchOwner, GetBucketEncryptionOutput, GetBucketLocationOutput, + GetBucketTaggingOutput, GetBucketVersioningOutput, GetObjectAttributesOutput, GetObjectAttributesParts, GetObjectAttributesRequest, GetObjectOutput, GetObjectRequest, + GetObjectTaggingOutput, HeadBucketOutput, HeadObjectOutput, HeadObjectRequest, @@ -72,6 +75,8 @@ MultipartUpload, MultipartUploadId, NoSuchBucket, + NoSuchKey, + NoSuchTagSet, NoSuchUpload, NotificationConfiguration, Object, @@ -86,6 +91,7 @@ Prefix, PutObjectOutput, PutObjectRequest, + PutObjectTaggingOutput, RequestPayer, RestoreObjectOutput, RestoreRequest, @@ -98,6 +104,7 @@ SSECustomerKeyMD5, StartAfter, StorageClass, + Tagging, Token, UploadIdMarker, UploadPartCopyOutput, @@ -125,9 +132,12 @@ get_kms_key_arn, get_owner_for_account_id, get_system_metadata_from_request, + get_unique_key_id, is_bucket_name_valid, parse_range_header, + parse_tagging_header, validate_kms_key_id, + validate_tag_set, ) from localstack.services.s3.v3.models import ( EncryptionParameters, @@ -349,7 +359,6 @@ def put_object( # grant_write_acp: GrantWriteACP = None, # - # request_payer: RequestPayer = None, - # tagging: TaggingHeader = None, # object_lock_mode: ObjectLockMode = None, # object_lock_retain_until_date: ObjectLockRetainUntilDate = None, # object_lock_legal_hold_status: ObjectLockLegalHoldStatus = None, @@ -431,7 +440,12 @@ def put_object( s3_bucket.objects.set(key, s3_object) - # TODO: tags: do we have tagging service or do we manually handle? see utils TaggingService + # in case we are overriding an object, delete the tags entry + key_id = get_unique_key_id(bucket_name, key, version_id) + store.TAGS.tags.pop(key_id, None) + if tagging_header := request.get("Tagging"): + tagging = parse_tagging_header(tagging_header) + store.TAGS.tags[key_id] = tagging # TODO: returned fields # RequestCharged: Optional[RequestCharged] # TODO @@ -476,6 +490,7 @@ def get_object( store = self.get_store(context.account_id, context.region) bucket_name = request["Bucket"] object_key = request["Key"] + version_id = request.get("VersionId") if not (s3_bucket := store.buckets.get(bucket_name)): raise NoSuchBucket("The specified bucket does not exist", BucketName=bucket_name) @@ -483,7 +498,7 @@ def get_object( s3_object = s3_bucket.get_object( key=object_key, - version_id=request.get("VersionId"), + version_id=version_id, http_method="GET", ) @@ -526,11 +541,15 @@ def get_object( add_encryption_to_response(response, s3_object=s3_object) + if object_tags := store.TAGS.tags.get( + get_unique_key_id(bucket_name, object_key, version_id) + ): + response["TagCount"] = len(object_tags) + # TODO: missing returned fields # Expiration: Optional[Expiration] # RequestCharged: Optional[RequestCharged] # ReplicationStatus: Optional[ReplicationStatus] - # TagCount: Optional[TagCount] # ObjectLockMode: Optional[ObjectLockMode] # ObjectLockRetainUntilDate: Optional[ObjectLockRetainUntilDate] # ObjectLockLegalHoldStatus: Optional[ObjectLockLegalHoldStatus] @@ -632,6 +651,8 @@ def delete_object( if found_object: self._storage_backend.remove(bucket, found_object) self._notify(context, s3_bucket=s3_bucket, s3_object=found_object) + store.TAGS.tags.pop(get_unique_key_id(bucket, key, version_id), None) + return DeleteObjectOutput() if not version_id: @@ -663,6 +684,7 @@ def delete_object( else: self._storage_backend.remove(bucket, found_object) self._notify(context, s3_bucket=s3_bucket, s3_object=found_object) + store.TAGS.tags.pop(get_unique_key_id(bucket, key, version_id), None) return response @@ -714,6 +736,7 @@ def delete_objects( if found_object: to_remove.append(found_object) self._notify(context, s3_bucket=s3_bucket, s3_object=found_object) + store.TAGS.tags.pop(get_unique_key_id(bucket, object_key, version_id), None) # small hack to not create a fake object for nothing elif s3_bucket.notification_configuration: # DeleteObjects is a bit weird, even if the object didn't exist, S3 will trigger a notification @@ -771,6 +794,7 @@ def delete_objects( to_remove.append(found_object) self._notify(context, s3_bucket=s3_bucket, s3_object=found_object) + store.TAGS.tags.pop(get_unique_key_id(bucket, object_key, version_id), None) # TODO: request charged self._storage_backend.remove(bucket, to_remove) @@ -801,10 +825,7 @@ def copy_object( # copy_source_if_none_match: CopySourceIfNoneMatch = None, # copy_source_if_unmodified_since: CopySourceIfUnmodifiedSince = None, # - # tagging_directive: TaggingDirective = None, - # # request_payer: RequestPayer = None, - # tagging: TaggingHeader = None, # object_lock_mode: ObjectLockMode = None, # object_lock_retain_until_date: ObjectLockRetainUntilDate = None, # object_lock_legal_hold_status: ObjectLockLegalHoldStatus = None, @@ -852,6 +873,9 @@ def copy_object( "object's metadata, storage class, website redirect location or encryption attributes." ) + if tagging := request.get("Tagging"): + tagging = parse_tagging_header(tagging) + if metadata_directive == "REPLACE": user_metadata = request.get("Metadata") system_metadata = get_system_metadata_from_request(request) @@ -902,6 +926,13 @@ def copy_object( # TODO: verify this assumption from moto? dest_s3_bucket.objects.set(dest_key, s3_object) + dest_key_id = get_unique_key_id(dest_bucket, dest_key, dest_version_id) + if (request.get("TaggingDirective")) == "REPLACE": + store.TAGS.tags[dest_key_id] = tagging + else: + src_key_id = get_unique_key_id(src_bucket, src_key, src_version_id) + store.TAGS.tags[dest_key_id] = copy.copy(store.TAGS.tags.get(src_key_id, {})) + copy_object_result = CopyObjectResult( ETag=s3_object.quoted_etag, LastModified=s3_object.last_modified, @@ -1377,7 +1408,6 @@ def create_multipart_upload( # grant_read_acp: GrantReadACP = None, # grant_write_acp: GrantWriteACP = None, # request_payer: RequestPayer = None, - # tagging: TaggingHeader = None, store = self.get_store(context.account_id, context.region) bucket_name = request["Bucket"] if not (s3_bucket := store.buckets.get(bucket_name)): @@ -1393,6 +1423,9 @@ def create_multipart_upload( if not config.S3_SKIP_KMS_KEY_VALIDATION and (sse_kms_key_id := request.get("SSEKMSKeyId")): validate_kms_key_id(sse_kms_key_id, s3_bucket) + if tagging := request.get("Tagging"): + tagging = parse_tagging_header(tagging_header=tagging) + key = request["Key"] system_metadata = get_system_metadata_from_request(request) @@ -1431,6 +1464,7 @@ def create_multipart_upload( expiration=None, # TODO, from lifecycle, or should it be updated with config? acl=None, initiator=get_owner_for_account_id(context.account_id), + tagging=tagging, ) s3_bucket.multiparts[s3_multipart.id] = s3_multipart @@ -1671,6 +1705,11 @@ def complete_multipart_upload( self._storage_backend.remove_multipart(bucket, s3_multipart) s3_bucket.multiparts.pop(s3_multipart.id, None) + key_id = get_unique_key_id(bucket, key, version_id) + store.TAGS.tags.pop(key_id, None) + if s3_multipart.tagging: + store.TAGS.tags[key_id] = s3_multipart.tagging + # TODO: validate if you provide wrong checksum compared to the given algorithm? should you calculate it anyway # when you complete? sounds weird, not sure how that works? @@ -1975,6 +2014,152 @@ def get_bucket_notification_configuration( return s3_bucket.notification_configuration or NotificationConfiguration() + def put_bucket_tagging( + self, + context: RequestContext, + bucket: BucketName, + tagging: Tagging, + content_md5: ContentMD5 = None, + checksum_algorithm: ChecksumAlgorithm = None, + expected_bucket_owner: AccountId = None, + ) -> None: + store = self.get_store(context.account_id, context.region) + if not (s3_bucket := store.buckets.get(bucket)): + raise NoSuchBucket("The specified bucket does not exist", BucketName=bucket) + + if "TagSet" not in tagging: + raise MalformedXML() + + validate_tag_set(tagging["TagSet"], type_set="bucket") + + # remove the previous tags before setting the new ones, it overwrites the whole TagSet + store.TAGS.tags.pop(s3_bucket.bucket_arn, None) + store.TAGS.tag_resource(s3_bucket.bucket_arn, tags=tagging["TagSet"]) + + def get_bucket_tagging( + self, context: RequestContext, bucket: BucketName, expected_bucket_owner: AccountId = None + ) -> GetBucketTaggingOutput: + store = self.get_store(context.account_id, context.region) + if not (s3_bucket := store.buckets.get(bucket)): + raise NoSuchBucket("The specified bucket does not exist", BucketName=bucket) + tag_set = store.TAGS.list_tags_for_resource(s3_bucket.bucket_arn, root_name="Tags")["Tags"] + if not tag_set: + raise NoSuchTagSet( + "The TagSet does not exist", + BucketName=bucket, + ) + + return GetBucketTaggingOutput(TagSet=tag_set) + + def delete_bucket_tagging( + self, context: RequestContext, bucket: BucketName, expected_bucket_owner: AccountId = None + ) -> None: + store = self.get_store(context.account_id, context.region) + if not (s3_bucket := store.buckets.get(bucket)): + raise NoSuchBucket("The specified bucket does not exist", BucketName=bucket) + + store.TAGS.tags.pop(s3_bucket.bucket_arn, None) + + def put_object_tagging( + self, + context: RequestContext, + bucket: BucketName, + key: ObjectKey, + tagging: Tagging, + version_id: ObjectVersionId = None, + content_md5: ContentMD5 = None, + checksum_algorithm: ChecksumAlgorithm = None, + expected_bucket_owner: AccountId = None, + request_payer: RequestPayer = None, + ) -> PutObjectTaggingOutput: + store = self.get_store(context.account_id, context.region) + if not (s3_bucket := store.buckets.get(bucket)): + raise NoSuchBucket("The specified bucket does not exist", BucketName=bucket) + + s3_object = s3_bucket.get_object( + key=key, + version_id=version_id, + raise_for_delete_marker=False, # We can tag DeleteMarker + ) + + if "TagSet" not in tagging: + raise MalformedXML() + + validate_tag_set(tagging["TagSet"], type_set="object") + + key_id = get_unique_key_id(bucket, key, version_id) + # remove the previous tags before setting the new ones, it overwrites the whole TagSet + store.TAGS.tags.pop(key_id, None) + store.TAGS.tag_resource(key_id, tags=tagging["TagSet"]) + response = PutObjectTaggingOutput() + if s3_object.version_id: + response["VersionId"] = s3_object.version_id + + self._notify(context, s3_bucket=s3_bucket, s3_object=s3_object) + + return response + + def get_object_tagging( + self, + context: RequestContext, + bucket: BucketName, + key: ObjectKey, + version_id: ObjectVersionId = None, + expected_bucket_owner: AccountId = None, + request_payer: RequestPayer = None, + ) -> GetObjectTaggingOutput: + store = self.get_store(context.account_id, context.region) + if not (s3_bucket := store.buckets.get(bucket)): + raise NoSuchBucket("The specified bucket does not exist", BucketName=bucket) + + try: + s3_object = s3_bucket.get_object( + key=key, + version_id=version_id, + raise_for_delete_marker=False, # We can tag DeleteMarker + ) + except NoSuchKey as e: + # There a weird AWS validated bug in S3: the returned key contains the bucket name as well + # follow AWS on this one + e.Key = f"{bucket}/{key}" + raise e + + tag_set = store.TAGS.list_tags_for_resource(get_unique_key_id(bucket, key, version_id))[ + "Tags" + ] + response = GetObjectTaggingOutput(TagSet=tag_set) + if s3_object.version_id: + response["VersionId"] = s3_object.version_id + + return response + + def delete_object_tagging( + self, + context: RequestContext, + bucket: BucketName, + key: ObjectKey, + version_id: ObjectVersionId = None, + expected_bucket_owner: AccountId = None, + ) -> DeleteObjectTaggingOutput: + store = self.get_store(context.account_id, context.region) + if not (s3_bucket := store.buckets.get(bucket)): + raise NoSuchBucket("The specified bucket does not exist", BucketName=bucket) + + s3_object = s3_bucket.get_object( + key=key, + version_id=version_id, + raise_for_delete_marker=False, + ) + + store.TAGS.tags.pop(get_unique_key_id(bucket, key, version_id), None) + response = DeleteObjectTaggingOutput() + if s3_object.version_id: + response["VersionId"] = s3_object.version_id + + self._notify(context, s3_bucket=s3_bucket, s3_object=s3_object) + + return response + def generate_version_id(bucket_versioning_status: str) -> str | None: if not bucket_versioning_status: diff --git a/tests/aws/s3/test_s3_api.py b/tests/aws/s3/test_s3_api.py index fd0b77d12cbcf..33ec0afde78f6 100644 --- a/tests/aws/s3/test_s3_api.py +++ b/tests/aws/s3/test_s3_api.py @@ -685,3 +685,357 @@ def test_s3_bucket_encryption_sse_kms_aws_managed_key(self, s3_bucket, aws_clien get_object_encrypted = aws_client.s3.get_object(Bucket=s3_bucket, Key=key_name) snapshot.match("get-object-encrypted", get_object_encrypted) + + +@pytest.mark.skipif( + condition=not config.NATIVE_S3_PROVIDER, + reason="These are WIP tests for the new native S3 provider", +) +class TestS3BucketObjectTagging: + @markers.aws.validated + def test_bucket_tagging_crud(self, s3_bucket, aws_client, snapshot): + snapshot.add_transformer(snapshot.transform.key_value("BucketName")) + with pytest.raises(ClientError) as e: + aws_client.s3.get_bucket_tagging(Bucket=s3_bucket) + snapshot.match("get-bucket-tags-empty", e.value.response) + + tag_set = {"TagSet": [{"Key": "tag1", "Value": "tag1"}, {"Key": "tag2", "Value": ""}]} + + put_bucket_tags = aws_client.s3.put_bucket_tagging(Bucket=s3_bucket, Tagging=tag_set) + snapshot.match("put-bucket-tags", put_bucket_tags) + + get_bucket_tags = aws_client.s3.get_bucket_tagging(Bucket=s3_bucket) + snapshot.match("get-bucket-tags", get_bucket_tags) + + tag_set_2 = {"TagSet": [{"Key": "tag3", "Value": "tag3"}]} + + put_bucket_tags = aws_client.s3.put_bucket_tagging(Bucket=s3_bucket, Tagging=tag_set_2) + snapshot.match("put-bucket-tags-overwrite", put_bucket_tags) + + get_bucket_tags = aws_client.s3.get_bucket_tagging(Bucket=s3_bucket) + snapshot.match("get-bucket-tags-overwritten", get_bucket_tags) + + delete_bucket_tags = aws_client.s3.delete_bucket_tagging(Bucket=s3_bucket) + snapshot.match("delete-bucket-tags", delete_bucket_tags) + + # test idempotency of delete + aws_client.s3.delete_bucket_tagging(Bucket=s3_bucket) + + with pytest.raises(ClientError) as e: + aws_client.s3.get_bucket_tagging(Bucket=s3_bucket) + e.match("NoSuchTagSet") + + # setting an empty tag set is the same as effectively deleting the TagSet + tag_set_empty = {"TagSet": []} + + put_bucket_tags = aws_client.s3.put_bucket_tagging(Bucket=s3_bucket, Tagging=tag_set_empty) + snapshot.match("put-bucket-tags-empty", put_bucket_tags) + + with pytest.raises(ClientError) as e: + aws_client.s3.get_bucket_tagging(Bucket=s3_bucket) + e.match("NoSuchTagSet") + + @markers.aws.validated + def test_bucket_tagging_exc(self, s3_bucket, aws_client, snapshot): + snapshot.add_transformer(snapshot.transform.key_value("BucketName")) + fake_bucket = f"fake-bucket-{short_uid()}-{short_uid()}" + with pytest.raises(ClientError) as e: + aws_client.s3.get_bucket_tagging(Bucket=fake_bucket) + snapshot.match("get-no-bucket-tags", e.value.response) + + with pytest.raises(ClientError) as e: + aws_client.s3.delete_bucket_tagging(Bucket=fake_bucket) + snapshot.match("delete-no-bucket-tags", e.value.response) + + with pytest.raises(ClientError) as e: + aws_client.s3.put_bucket_tagging(Bucket=fake_bucket, Tagging={"TagSet": []}) + snapshot.match("put-no-bucket-tags", e.value.response) + + @markers.aws.validated + def test_object_tagging_crud(self, s3_bucket, aws_client, snapshot): + object_key = "test-object-tagging" + put_object = aws_client.s3.put_object(Bucket=s3_bucket, Key=object_key, Body="test-tagging") + snapshot.match("put-object", put_object) + + get_bucket_tags = aws_client.s3.get_object_tagging(Bucket=s3_bucket, Key=object_key) + snapshot.match("get-object-tags-empty", get_bucket_tags) + + tag_set = {"TagSet": [{"Key": "tag1", "Value": "tag1"}, {"Key": "tag2", "Value": ""}]} + + put_bucket_tags = aws_client.s3.put_object_tagging( + Bucket=s3_bucket, Key=object_key, Tagging=tag_set + ) + snapshot.match("put-object-tags", put_bucket_tags) + + get_bucket_tags = aws_client.s3.get_object_tagging(Bucket=s3_bucket, Key=object_key) + snapshot.match("get-object-tags", get_bucket_tags) + + tag_set_2 = {"TagSet": [{"Key": "tag3", "Value": "tag3"}]} + + put_bucket_tags = aws_client.s3.put_object_tagging( + Bucket=s3_bucket, Key=object_key, Tagging=tag_set_2 + ) + snapshot.match("put-object-tags-overwrite", put_bucket_tags) + + get_bucket_tags = aws_client.s3.get_object_tagging(Bucket=s3_bucket, Key=object_key) + snapshot.match("get-object-tags-overwritten", get_bucket_tags) + + get_object = aws_client.s3.get_object(Bucket=s3_bucket, Key=object_key) + snapshot.match("get-obj-after-tags", get_object) + + delete_bucket_tags = aws_client.s3.delete_object_tagging(Bucket=s3_bucket, Key=object_key) + snapshot.match("delete-object-tags", delete_bucket_tags) + + get_bucket_tags = aws_client.s3.get_object_tagging(Bucket=s3_bucket, Key=object_key) + snapshot.match("get-object-tags-deleted", get_bucket_tags) + + get_object = aws_client.s3.get_object(Bucket=s3_bucket, Key=object_key) + snapshot.match("get-obj-after-tags-deleted", get_object) + + @markers.aws.validated + def test_object_tagging_exc(self, s3_bucket, aws_client, snapshot): + snapshot.add_transformer(snapshot.transform.key_value("BucketName")) + snapshot.add_transformer(snapshot.transform.regex(s3_bucket, replacement="")) + fake_bucket = f"fake-bucket-{short_uid()}-{short_uid()}" + fake_key = "fake-key" + with pytest.raises(ClientError) as e: + aws_client.s3.get_object_tagging(Bucket=fake_bucket, Key=fake_key) + snapshot.match("get-no-bucket-tags", e.value.response) + + with pytest.raises(ClientError) as e: + aws_client.s3.delete_object_tagging(Bucket=fake_bucket, Key=fake_key) + snapshot.match("delete-no-bucket-tags", e.value.response) + + with pytest.raises(ClientError) as e: + aws_client.s3.put_object_tagging( + Bucket=fake_bucket, Tagging={"TagSet": []}, Key=fake_key + ) + snapshot.match("put-no-bucket-tags", e.value.response) + + with pytest.raises(ClientError) as e: + aws_client.s3.get_object_tagging(Bucket=s3_bucket, Key=fake_key) + snapshot.match("get-no-key-tags", e.value.response) + + with pytest.raises(ClientError) as e: + aws_client.s3.delete_object_tagging(Bucket=s3_bucket, Key=fake_key) + snapshot.match("delete-no-key-tags", e.value.response) + + with pytest.raises(ClientError) as e: + aws_client.s3.put_object_tagging(Bucket=s3_bucket, Tagging={"TagSet": []}, Key=fake_key) + snapshot.match("put-no-key-tags", e.value.response) + + with pytest.raises(ClientError) as e: + tagging = "key1=val1&key1=val2" + aws_client.s3.put_object(Bucket=s3_bucket, Key=fake_key, Body="", Tagging=tagging) + snapshot.match("put-obj-duplicate-tagging", e.value.response) + + with pytest.raises(ClientError) as e: + tagging = "key1=val1,key2=val2" + aws_client.s3.put_object(Bucket=s3_bucket, Key=fake_key, Body="", Tagging=tagging) + snapshot.match("put-obj-wrong-format", e.value.response) + + @markers.aws.validated + def test_object_tagging_versioned(self, s3_bucket, aws_client, snapshot): + snapshot.add_transformer(snapshot.transform.key_value("VersionId")) + aws_client.s3.put_bucket_versioning( + Bucket=s3_bucket, VersioningConfiguration={"Status": "Enabled"} + ) + object_key = "test-version-tagging" + version_ids = [] + for i in range(2): + put_obj = aws_client.s3.put_object(Bucket=s3_bucket, Key=object_key, Body=f"test-{i}") + snapshot.match(f"put-obj-{i}", put_obj) + version_ids.append(put_obj["VersionId"]) + + version_id_1, version_id_2 = version_ids + + tag_set_2 = {"TagSet": [{"Key": "tag3", "Value": "tag3"}]} + + # test without specifying a VersionId + put_bucket_tags = aws_client.s3.put_object_tagging( + Bucket=s3_bucket, Key=object_key, Tagging=tag_set_2 + ) + snapshot.match("put-object-tags-current-version", put_bucket_tags) + assert put_bucket_tags["VersionId"] == version_id_2 + + get_bucket_tags = aws_client.s3.get_object_tagging(Bucket=s3_bucket, Key=object_key) + snapshot.match("get-object-tags-current-version", get_bucket_tags) + + tag_set_2 = {"TagSet": [{"Key": "tag1", "Value": "tag1"}]} + # test by specifying a VersionId to Version1 + put_bucket_tags = aws_client.s3.put_object_tagging( + Bucket=s3_bucket, Key=object_key, VersionId=version_id_1, Tagging=tag_set_2 + ) + snapshot.match("put-object-tags-previous-version", put_bucket_tags) + assert put_bucket_tags["VersionId"] == version_id_1 + + get_bucket_tags = aws_client.s3.get_object_tagging( + Bucket=s3_bucket, Key=object_key, VersionId=version_id_1 + ) + snapshot.match("get-object-tags-previous-version", get_bucket_tags) + + # Put a DeleteMarker on top of the stack + delete_current = aws_client.s3.delete_object(Bucket=s3_bucket, Key=object_key) + snapshot.match("put-delete-marker", delete_current) + + # test to put/get tagging on a DeleteMarker + put_bucket_tags = aws_client.s3.put_object_tagging( + Bucket=s3_bucket, Key=object_key, VersionId=version_id_1, Tagging=tag_set_2 + ) + snapshot.match("put-object-tags-delete-marker", put_bucket_tags) + + get_bucket_tags = aws_client.s3.get_object_tagging( + Bucket=s3_bucket, Key=object_key, VersionId=version_id_1 + ) + snapshot.match("get-object-tags-delete-marker", get_bucket_tags) + + @markers.aws.validated + def test_put_object_with_tags(self, s3_bucket, aws_client, snapshot): + object_key = "test-put-object-tagging" + # tagging must be a URL encoded string directly + tag_set = "tag1=tag1&tag2=tag2&tag=" + put_object = aws_client.s3.put_object( + Bucket=s3_bucket, Key=object_key, Body="test-tagging", Tagging=tag_set + ) + snapshot.match("put-object", put_object) + + get_object_tags = aws_client.s3.get_object_tagging(Bucket=s3_bucket, Key=object_key) + # only TagSet set with the query string format are unordered, so not using the SortingTransformer + get_object_tags["TagSet"].sort(key=itemgetter("Key")) + snapshot.match("get-object-tags", get_object_tags) + + tag_set_2 = {"TagSet": [{"Key": "tag3", "Value": "tag3"}]} + put_bucket_tags = aws_client.s3.put_object_tagging( + Bucket=s3_bucket, Key=object_key, Tagging=tag_set_2 + ) + snapshot.match("put-object-tags", put_bucket_tags) + + get_object_tags = aws_client.s3.get_object_tagging(Bucket=s3_bucket, Key=object_key) + snapshot.match("get-object-tags-override", get_object_tags) + + head_object = aws_client.s3.head_object(Bucket=s3_bucket, Key=object_key) + snapshot.match("head-obj", head_object) + + get_object = aws_client.s3.get_object(Bucket=s3_bucket, Key=object_key) + snapshot.match("get-obj", get_object) + + tagging = "wrongquery&wrongagain" + aws_client.s3.put_object(Bucket=s3_bucket, Key=object_key, Body="", Tagging=tagging) + + get_object_tags = aws_client.s3.get_object_tagging(Bucket=s3_bucket, Key=object_key) + # only TagSet set with the query string format are unordered, so not using the SortingTransformer + get_object_tags["TagSet"].sort(key=itemgetter("Key")) + snapshot.match("get-object-tags-wrong-format-qs", get_object_tags) + + tagging = "key1&&&key2" + aws_client.s3.put_object(Bucket=s3_bucket, Key=object_key, Body="", Tagging=tagging) + + get_object_tags = aws_client.s3.get_object_tagging(Bucket=s3_bucket, Key=object_key) + snapshot.match("get-object-tags-wrong-format-qs-2", get_object_tags) + + @markers.aws.validated + def test_object_tags_delete_or_overwrite_object(self, s3_bucket, aws_client, snapshot): + # verify that tags aren't kept after object deletion + object_key = "test-put-object-tagging-kept" + aws_client.s3.put_object( + Bucket=s3_bucket, Key=object_key, Body="create", Tagging="tag1=val1" + ) + + get_bucket_tags = aws_client.s3.get_object_tagging(Bucket=s3_bucket, Key=object_key) + snapshot.match("get-object-after-creation", get_bucket_tags) + + aws_client.s3.put_object(Bucket=s3_bucket, Key=object_key, Body="overwrite") + + get_bucket_tags = aws_client.s3.get_object_tagging(Bucket=s3_bucket, Key=object_key) + snapshot.match("get-object-after-overwrite", get_bucket_tags) + + # put some tags to verify they won't be kept + tag_set = {"TagSet": [{"Key": "tag3", "Value": "tag3"}]} + aws_client.s3.put_object_tagging(Bucket=s3_bucket, Key=object_key, Tagging=tag_set) + + aws_client.s3.delete_object(Bucket=s3_bucket, Key=object_key) + aws_client.s3.put_object(Bucket=s3_bucket, Key=object_key, Body="recreate") + + get_bucket_tags = aws_client.s3.get_object_tagging(Bucket=s3_bucket, Key=object_key) + snapshot.match("get-object-after-recreation", get_bucket_tags) + + @markers.aws.validated + def test_tagging_validation(self, s3_bucket, aws_client, snapshot): + object_key = "tagging-validation" + aws_client.s3.put_object(Bucket=s3_bucket, Key=object_key, Body=b"") + + with pytest.raises(ClientError) as e: + aws_client.s3.put_bucket_tagging( + Bucket=s3_bucket, + Tagging={ + "TagSet": [ + {"Key": "Key1", "Value": "Val1"}, + {"Key": "Key1", "Value": "Val1"}, + ] + }, + ) + snapshot.match("put-bucket-tags-duplicate-keys", e.value.response) + + with pytest.raises(ClientError) as e: + aws_client.s3.put_bucket_tagging( + Bucket=s3_bucket, + Tagging={ + "TagSet": [ + {"Key": "Key1,Key2", "Value": "Val1"}, + ] + }, + ) + snapshot.match("put-bucket-tags-invalid-key", e.value.response) + + with pytest.raises(ClientError) as e: + aws_client.s3.put_bucket_tagging( + Bucket=s3_bucket, + Tagging={ + "TagSet": [ + {"Key": "Key1", "Value": "Val1,Val2"}, + ] + }, + ) + snapshot.match("put-bucket-tags-invalid-value", e.value.response) + + with pytest.raises(ClientError) as e: + aws_client.s3.put_bucket_tagging( + Bucket=s3_bucket, + Tagging={ + "TagSet": [ + {"Key": "aws:prefixed", "Value": "Val1"}, + ] + }, + ) + snapshot.match("put-bucket-tags-aws-prefixed", e.value.response) + + with pytest.raises(ClientError) as e: + aws_client.s3.put_object_tagging( + Bucket=s3_bucket, + Key=object_key, + Tagging={ + "TagSet": [ + {"Key": "Key1", "Value": "Val1"}, + {"Key": "Key1", "Value": "Val1"}, + ] + }, + ) + + snapshot.match("put-object-tags-duplicate-keys", e.value.response) + + with pytest.raises(ClientError) as e: + aws_client.s3.put_object_tagging( + Bucket=s3_bucket, + Key=object_key, + Tagging={"TagSet": [{"Key": "Key1,Key2", "Value": "Val1"}]}, + ) + + snapshot.match("put-object-tags-invalid-field", e.value.response) + + with pytest.raises(ClientError) as e: + aws_client.s3.put_object_tagging( + Bucket=s3_bucket, + Key=object_key, + Tagging={"TagSet": [{"Key": "aws:prefixed", "Value": "Val1"}]}, + ) + snapshot.match("put-object-tags-aws-prefixed", e.value.response) diff --git a/tests/aws/s3/test_s3_api.snapshot.json b/tests/aws/s3/test_s3_api.snapshot.json index 9e0927556f8aa..6c444254b2a82 100644 --- a/tests/aws/s3/test_s3_api.snapshot.json +++ b/tests/aws/s3/test_s3_api.snapshot.json @@ -1491,5 +1491,624 @@ } } } + }, + "tests/integration/s3/test_s3_api.py::TestS3BucketObjectTagging::test_bucket_tagging_crud": { + "recorded-date": "02-08-2023, 22:18:20", + "recorded-content": { + "get-bucket-tags-empty": { + "Error": { + "BucketName": "", + "Code": "NoSuchTagSet", + "Message": "The TagSet does not exist" + }, + "ResponseMetadata": { + "HTTPHeaders": {}, + "HTTPStatusCode": 404 + } + }, + "put-bucket-tags": { + "ResponseMetadata": { + "HTTPHeaders": {}, + "HTTPStatusCode": 204 + } + }, + "get-bucket-tags": { + "TagSet": [ + { + "Key": "tag1", + "Value": "tag1" + }, + { + "Key": "tag2", + "Value": "" + } + ], + "ResponseMetadata": { + "HTTPHeaders": {}, + "HTTPStatusCode": 200 + } + }, + "put-bucket-tags-overwrite": { + "ResponseMetadata": { + "HTTPHeaders": {}, + "HTTPStatusCode": 204 + } + }, + "get-bucket-tags-overwritten": { + "TagSet": [ + { + "Key": "tag3", + "Value": "tag3" + } + ], + "ResponseMetadata": { + "HTTPHeaders": {}, + "HTTPStatusCode": 200 + } + }, + "delete-bucket-tags": { + "ResponseMetadata": { + "HTTPHeaders": {}, + "HTTPStatusCode": 204 + } + }, + "put-bucket-tags-empty": { + "ResponseMetadata": { + "HTTPHeaders": {}, + "HTTPStatusCode": 204 + } + } + } + }, + "tests/integration/s3/test_s3_api.py::TestS3BucketObjectTagging::test_object_tagging_crud": { + "recorded-date": "02-08-2023, 23:23:45", + "recorded-content": { + "put-object": { + "ETag": "\"b635a7fc30aa9091e0d236bee77e6844\"", + "ServerSideEncryption": "AES256", + "ResponseMetadata": { + "HTTPHeaders": {}, + "HTTPStatusCode": 200 + } + }, + "get-object-tags-empty": { + "TagSet": [], + "ResponseMetadata": { + "HTTPHeaders": {}, + "HTTPStatusCode": 200 + } + }, + "put-object-tags": { + "ResponseMetadata": { + "HTTPHeaders": {}, + "HTTPStatusCode": 200 + } + }, + "get-object-tags": { + "TagSet": [ + { + "Key": "tag1", + "Value": "tag1" + }, + { + "Key": "tag2", + "Value": "" + } + ], + "ResponseMetadata": { + "HTTPHeaders": {}, + "HTTPStatusCode": 200 + } + }, + "put-object-tags-overwrite": { + "ResponseMetadata": { + "HTTPHeaders": {}, + "HTTPStatusCode": 200 + } + }, + "get-object-tags-overwritten": { + "TagSet": [ + { + "Key": "tag3", + "Value": "tag3" + } + ], + "ResponseMetadata": { + "HTTPHeaders": {}, + "HTTPStatusCode": 200 + } + }, + "get-obj-after-tags": { + "AcceptRanges": "bytes", + "Body": "test-tagging", + "ContentLength": 12, + "ContentType": "binary/octet-stream", + "ETag": "\"b635a7fc30aa9091e0d236bee77e6844\"", + "LastModified": "datetime", + "Metadata": {}, + "ServerSideEncryption": "AES256", + "TagCount": 1, + "ResponseMetadata": { + "HTTPHeaders": {}, + "HTTPStatusCode": 200 + } + }, + "delete-object-tags": { + "ResponseMetadata": { + "HTTPHeaders": {}, + "HTTPStatusCode": 204 + } + }, + "get-object-tags-deleted": { + "TagSet": [], + "ResponseMetadata": { + "HTTPHeaders": {}, + "HTTPStatusCode": 200 + } + }, + "get-obj-after-tags-deleted": { + "AcceptRanges": "bytes", + "Body": "test-tagging", + "ContentLength": 12, + "ContentType": "binary/octet-stream", + "ETag": "\"b635a7fc30aa9091e0d236bee77e6844\"", + "LastModified": "datetime", + "Metadata": {}, + "ServerSideEncryption": "AES256", + "ResponseMetadata": { + "HTTPHeaders": {}, + "HTTPStatusCode": 200 + } + } + } + }, + "tests/integration/s3/test_s3_api.py::TestS3BucketObjectTagging::test_put_object_with_tags": { + "recorded-date": "03-08-2023, 01:21:13", + "recorded-content": { + "put-object": { + "ETag": "\"b635a7fc30aa9091e0d236bee77e6844\"", + "ServerSideEncryption": "AES256", + "ResponseMetadata": { + "HTTPHeaders": {}, + "HTTPStatusCode": 200 + } + }, + "get-object-tags": { + "TagSet": [ + { + "Key": "tag", + "Value": "" + }, + { + "Key": "tag1", + "Value": "tag1" + }, + { + "Key": "tag2", + "Value": "tag2" + } + ], + "ResponseMetadata": { + "HTTPHeaders": {}, + "HTTPStatusCode": 200 + } + }, + "put-object-tags": { + "ResponseMetadata": { + "HTTPHeaders": {}, + "HTTPStatusCode": 200 + } + }, + "get-object-tags-override": { + "TagSet": [ + { + "Key": "tag3", + "Value": "tag3" + } + ], + "ResponseMetadata": { + "HTTPHeaders": {}, + "HTTPStatusCode": 200 + } + }, + "head-obj": { + "AcceptRanges": "bytes", + "ContentLength": 12, + "ContentType": "binary/octet-stream", + "ETag": "\"b635a7fc30aa9091e0d236bee77e6844\"", + "LastModified": "datetime", + "Metadata": {}, + "ServerSideEncryption": "AES256", + "ResponseMetadata": { + "HTTPHeaders": {}, + "HTTPStatusCode": 200 + } + }, + "get-obj": { + "AcceptRanges": "bytes", + "Body": "test-tagging", + "ContentLength": 12, + "ContentType": "binary/octet-stream", + "ETag": "\"b635a7fc30aa9091e0d236bee77e6844\"", + "LastModified": "datetime", + "Metadata": {}, + "ServerSideEncryption": "AES256", + "TagCount": 1, + "ResponseMetadata": { + "HTTPHeaders": {}, + "HTTPStatusCode": 200 + } + }, + "get-object-tags-wrong-format-qs": { + "TagSet": [ + { + "Key": "wrongagain", + "Value": "" + }, + { + "Key": "wrongquery", + "Value": "" + } + ], + "ResponseMetadata": { + "HTTPHeaders": {}, + "HTTPStatusCode": 200 + } + }, + "get-object-tags-wrong-format-qs-2": { + "TagSet": [ + { + "Key": "key1", + "Value": "" + }, + { + "Key": "key2", + "Value": "" + } + ], + "ResponseMetadata": { + "HTTPHeaders": {}, + "HTTPStatusCode": 200 + } + } + } + }, + "tests/integration/s3/test_s3_api.py::TestS3BucketObjectTagging::test_bucket_tagging_exc": { + "recorded-date": "02-08-2023, 22:32:41", + "recorded-content": { + "get-no-bucket-tags": { + "Error": { + "BucketName": "", + "Code": "NoSuchBucket", + "Message": "The specified bucket does not exist" + }, + "ResponseMetadata": { + "HTTPHeaders": {}, + "HTTPStatusCode": 404 + } + }, + "delete-no-bucket-tags": { + "Error": { + "BucketName": "", + "Code": "NoSuchBucket", + "Message": "The specified bucket does not exist" + }, + "ResponseMetadata": { + "HTTPHeaders": {}, + "HTTPStatusCode": 404 + } + }, + "put-no-bucket-tags": { + "Error": { + "BucketName": "", + "Code": "NoSuchBucket", + "Message": "The specified bucket does not exist" + }, + "ResponseMetadata": { + "HTTPHeaders": {}, + "HTTPStatusCode": 404 + } + } + } + }, + "tests/integration/s3/test_s3_api.py::TestS3BucketObjectTagging::test_object_tagging_versioned": { + "recorded-date": "02-08-2023, 23:14:16", + "recorded-content": { + "put-obj-0": { + "ETag": "\"86639701cdcc5b39438a5f009bd74cb1\"", + "ServerSideEncryption": "AES256", + "VersionId": "", + "ResponseMetadata": { + "HTTPHeaders": {}, + "HTTPStatusCode": 200 + } + }, + "put-obj-1": { + "ETag": "\"70a37754eb5a2e7db8cd887aaf11cda7\"", + "ServerSideEncryption": "AES256", + "VersionId": "", + "ResponseMetadata": { + "HTTPHeaders": {}, + "HTTPStatusCode": 200 + } + }, + "put-object-tags-current-version": { + "VersionId": "", + "ResponseMetadata": { + "HTTPHeaders": {}, + "HTTPStatusCode": 200 + } + }, + "get-object-tags-current-version": { + "TagSet": [ + { + "Key": "tag3", + "Value": "tag3" + } + ], + "VersionId": "", + "ResponseMetadata": { + "HTTPHeaders": {}, + "HTTPStatusCode": 200 + } + }, + "put-object-tags-previous-version": { + "VersionId": "", + "ResponseMetadata": { + "HTTPHeaders": {}, + "HTTPStatusCode": 200 + } + }, + "get-object-tags-previous-version": { + "TagSet": [ + { + "Key": "tag1", + "Value": "tag1" + } + ], + "VersionId": "", + "ResponseMetadata": { + "HTTPHeaders": {}, + "HTTPStatusCode": 200 + } + }, + "put-delete-marker": { + "DeleteMarker": true, + "VersionId": "", + "ResponseMetadata": { + "HTTPHeaders": {}, + "HTTPStatusCode": 204 + } + }, + "put-object-tags-delete-marker": { + "VersionId": "", + "ResponseMetadata": { + "HTTPHeaders": {}, + "HTTPStatusCode": 200 + } + }, + "get-object-tags-delete-marker": { + "TagSet": [ + { + "Key": "tag1", + "Value": "tag1" + } + ], + "VersionId": "", + "ResponseMetadata": { + "HTTPHeaders": {}, + "HTTPStatusCode": 200 + } + } + } + }, + "tests/integration/s3/test_s3_api.py::TestS3BucketObjectTagging::test_object_tagging_exc": { + "recorded-date": "03-08-2023, 00:04:47", + "recorded-content": { + "get-no-bucket-tags": { + "Error": { + "BucketName": "", + "Code": "NoSuchBucket", + "Message": "The specified bucket does not exist" + }, + "ResponseMetadata": { + "HTTPHeaders": {}, + "HTTPStatusCode": 404 + } + }, + "delete-no-bucket-tags": { + "Error": { + "BucketName": "", + "Code": "NoSuchBucket", + "Message": "The specified bucket does not exist" + }, + "ResponseMetadata": { + "HTTPHeaders": {}, + "HTTPStatusCode": 404 + } + }, + "put-no-bucket-tags": { + "Error": { + "BucketName": "", + "Code": "NoSuchBucket", + "Message": "The specified bucket does not exist" + }, + "ResponseMetadata": { + "HTTPHeaders": {}, + "HTTPStatusCode": 404 + } + }, + "get-no-key-tags": { + "Error": { + "Code": "NoSuchKey", + "Key": "/fake-key", + "Message": "The specified key does not exist." + }, + "ResponseMetadata": { + "HTTPHeaders": {}, + "HTTPStatusCode": 404 + } + }, + "delete-no-key-tags": { + "Error": { + "Code": "NoSuchKey", + "Key": "fake-key", + "Message": "The specified key does not exist." + }, + "ResponseMetadata": { + "HTTPHeaders": {}, + "HTTPStatusCode": 404 + } + }, + "put-no-key-tags": { + "Error": { + "Code": "NoSuchKey", + "Key": "fake-key", + "Message": "The specified key does not exist." + }, + "ResponseMetadata": { + "HTTPHeaders": {}, + "HTTPStatusCode": 404 + } + }, + "put-obj-duplicate-tagging": { + "Error": { + "ArgumentName": "x-amz-tagging", + "ArgumentValue": "key1=val1&key1=val2", + "Code": "InvalidArgument", + "Message": "The header 'x-amz-tagging' shall be encoded as UTF-8 then URLEncoded URL query parameters without tag name duplicates." + }, + "ResponseMetadata": { + "HTTPHeaders": {}, + "HTTPStatusCode": 400 + } + }, + "put-obj-wrong-format": { + "Error": { + "ArgumentName": "x-amz-tagging", + "ArgumentValue": "key1=val1,key2=val2", + "Code": "InvalidArgument", + "Message": "The header 'x-amz-tagging' shall be encoded as UTF-8 then URLEncoded URL query parameters without tag name duplicates." + }, + "ResponseMetadata": { + "HTTPHeaders": {}, + "HTTPStatusCode": 400 + } + } + } + }, + "tests/integration/s3/test_s3_api.py::TestS3BucketObjectTagging::test_object_tags_delete_or_overwrite_object": { + "recorded-date": "02-08-2023, 23:52:10", + "recorded-content": { + "get-object-after-creation": { + "TagSet": [ + { + "Key": "tag1", + "Value": "val1" + } + ], + "ResponseMetadata": { + "HTTPHeaders": {}, + "HTTPStatusCode": 200 + } + }, + "get-object-after-overwrite": { + "TagSet": [], + "ResponseMetadata": { + "HTTPHeaders": {}, + "HTTPStatusCode": 200 + } + }, + "get-object-after-recreation": { + "TagSet": [], + "ResponseMetadata": { + "HTTPHeaders": {}, + "HTTPStatusCode": 200 + } + } + } + }, + "tests/integration/s3/test_s3_api.py::TestS3BucketObjectTagging::test_tagging_validation": { + "recorded-date": "03-08-2023, 01:07:47", + "recorded-content": { + "put-bucket-tags-duplicate-keys": { + "Error": { + "Code": "InvalidTag", + "Message": "Cannot provide multiple Tags with the same key", + "TagKey": "Key1" + }, + "ResponseMetadata": { + "HTTPHeaders": {}, + "HTTPStatusCode": 400 + } + }, + "put-bucket-tags-invalid-key": { + "Error": { + "Code": "InvalidTag", + "Message": "The TagKey you have provided is invalid", + "TagKey": "Key1,Key2" + }, + "ResponseMetadata": { + "HTTPHeaders": {}, + "HTTPStatusCode": 400 + } + }, + "put-bucket-tags-invalid-value": { + "Error": { + "Code": "InvalidTag", + "Message": "The TagValue you have provided is invalid", + "TagKey": "Key1", + "TagValue": "Val1,Val2" + }, + "ResponseMetadata": { + "HTTPHeaders": {}, + "HTTPStatusCode": 400 + } + }, + "put-bucket-tags-aws-prefixed": { + "Error": { + "Code": "InvalidTag", + "Message": "System tags cannot be added/updated by requester", + "TagKey": "aws:prefixed" + }, + "ResponseMetadata": { + "HTTPHeaders": {}, + "HTTPStatusCode": 400 + } + }, + "put-object-tags-duplicate-keys": { + "Error": { + "Code": "InvalidTag", + "Message": "Cannot provide multiple Tags with the same key", + "TagKey": "Key1" + }, + "ResponseMetadata": { + "HTTPHeaders": {}, + "HTTPStatusCode": 400 + } + }, + "put-object-tags-invalid-field": { + "Error": { + "Code": "InvalidTag", + "Message": "The TagKey you have provided is invalid", + "TagKey": "Key1,Key2" + }, + "ResponseMetadata": { + "HTTPHeaders": {}, + "HTTPStatusCode": 400 + } + }, + "put-object-tags-aws-prefixed": { + "Error": { + "Code": "InvalidTag", + "Message": "Your TagKey cannot be prefixed with aws:", + "TagKey": "aws:prefixed" + }, + "ResponseMetadata": { + "HTTPHeaders": {}, + "HTTPStatusCode": 400 + } + } + } } }