Skip to content

Commit

Permalink
S3: Improve Lock behaviour (#8036)
Browse files Browse the repository at this point in the history
  • Loading branch information
bblommers authored Aug 26, 2024
1 parent 8a87da8 commit 1056bf4
Show file tree
Hide file tree
Showing 13 changed files with 379 additions and 90 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/tests_real_aws.yml
Original file line number Diff line number Diff line change
Expand Up @@ -44,4 +44,4 @@ jobs:
env:
MOTO_TEST_ALLOW_AWS_REQUEST: ${{ true }}
run: |
pytest -sv -n auto tests/test_applicationautoscaling/ tests/test_athena/ tests/test_cloudformation/ tests/test_dynamodb/ tests/test_ec2/ tests/test_events/ tests/test_iam/ tests/test_iot/ tests/test_lakeformation/ tests/test_logs/ tests/test_sqs/ tests/test_ses/ tests/test_s3* tests/test_stepfunctions/ tests/test_sns/ tests/test_timestreamwrite/ -m aws_verified
pytest -sv -n --dist loadfile auto tests/test_applicationautoscaling/ tests/test_athena/ tests/test_cloudformation/ tests/test_dynamodb/ tests/test_ec2/ tests/test_events/ tests/test_iam/ tests/test_iot/ tests/test_lakeformation/ tests/test_logs/ tests/test_sqs/ tests/test_ses/ tests/test_s3* tests/test_stepfunctions/ tests/test_sns/ tests/test_timestreamwrite/ -m aws_verified
10 changes: 10 additions & 0 deletions moto/logs/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -1233,6 +1233,16 @@ def get_query_results(self, query_id: str) -> LogQuery:
"""
return self.queries[query_id]

def cancel_export_task(self, task_id: str) -> None:
task = self.export_tasks.get(task_id)
if not task:
raise ResourceNotFoundException("The specified export task does not exist.")
# If the task has already finished, AWS throws an InvalidOperationException
# 'The specified export task has already finished'
# However, the export task is currently syncronous, meaning it finishes immediately
# When we make the Task async, we can also implement the error behaviour
task.status = {"code": "CANCELLED", "message": "Cancelled by user"}

def create_export_task(
self,
taskName: str,
Expand Down
5 changes: 5 additions & 0 deletions moto/logs/responses.py
Original file line number Diff line number Diff line change
Expand Up @@ -432,6 +432,11 @@ def get_query_results(self) -> str:
query = self.logs_backend.get_query_results(query_id)
return json.dumps(query.to_result_json())

def cancel_export_task(self) -> str:
task_id = self._get_param("taskId")
self.logs_backend.cancel_export_task(task_id)
return "{}"

def create_export_task(self) -> str:
task_id = self.logs_backend.create_export_task(
logGroupName=self._get_param("logGroupName"),
Expand Down
14 changes: 14 additions & 0 deletions moto/s3/exceptions.py
Original file line number Diff line number Diff line change
Expand Up @@ -517,6 +517,13 @@ def __init__(self) -> None:
)


class InvalidBucketState(S3ClientError):
code = 400

def __init__(self, msg: str):
super().__init__("InvalidBucketState", msg)


class InvalidObjectState(BucketError):
code = 403

Expand All @@ -537,6 +544,13 @@ def __init__(self) -> None:
super().__init__("InvalidRequest", "Bucket is missing ObjectLockConfiguration")


class MissingRequestBody(S3ClientError):
code = 400

def __init__(self) -> None:
super().__init__("MissingRequestBodyError", "Request Body is empty")


class AccessDeniedByLock(S3ClientError):
code = 400

Expand Down
33 changes: 23 additions & 10 deletions moto/s3/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@
EntityTooSmall,
HeadOnDeleteMarker,
InvalidBucketName,
InvalidBucketState,
InvalidNotificationDestination,
InvalidNotificationEvent,
InvalidObjectState,
Expand Down Expand Up @@ -371,12 +372,14 @@ def __setstate__(self, state: Dict[str, Any]) -> None:
self.value = state["value"] # type: ignore
self.lock = threading.Lock()

@property
def is_locked(self) -> bool:
def is_locked(self, governance_bypass: bool) -> bool:
if self.lock_legal_status == "ON":
return True

if self.lock_mode == "COMPLIANCE":
if self.lock_mode == "GOVERNANCE" and governance_bypass:
return False

if self.lock_mode in ["GOVERNANCE", "COMPLIANCE"]:
now = utcnow()
try:
until = datetime.datetime.strptime(
Expand Down Expand Up @@ -2393,6 +2396,10 @@ def put_object_lock_configuration(
years: Optional[int] = None,
) -> None:
bucket = self.get_bucket(bucket_name)
if not bucket.is_versioned:
raise InvalidBucketState(
"Versioning must be 'Enabled' on the bucket to apply a Object Lock configuration"
)

if bucket.keys.item_size() > 0:
raise BucketNeedsToBeNew
Expand Down Expand Up @@ -2788,10 +2795,8 @@ def delete_object(

for key in bucket.keys.getlist(key_name):
if str(key.version_id) == str(version_id):
if (
hasattr(key, "is_locked")
and key.is_locked
and not bypass
if isinstance(key, FakeKey) and key.is_locked(
governance_bypass=bypass
):
raise AccessDeniedByLock

Expand Down Expand Up @@ -2833,21 +2838,29 @@ def delete_objects(
bucket_name: str,
objects: List[Dict[str, Any]],
bypass_retention: bool = False,
) -> Tuple[List[Tuple[str, Optional[str]]], List[str]]:
) -> Tuple[List[Tuple[str, Optional[str], Optional[str]]], List[str]]:
deleted = []
errors = []
for object_ in objects:
key_name = object_["Key"]
version_id = object_.get("VersionId", None)

try:
self.delete_object(
success, headers = self.delete_object(
bucket_name,
key_name,
version_id=version_id,
bypass=bypass_retention,
)
deleted.append((key_name, version_id))
deleted.append(
(
key_name,
version_id,
headers.get("version-id")
if headers and not version_id
else None,
)
)
except AccessDeniedByLock:
errors.append(key_name)
return deleted, errors
Expand Down
26 changes: 23 additions & 3 deletions moto/s3/responses.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
from typing import Any, Dict, Iterator, List, Optional, Tuple, Type, Union
from urllib.parse import parse_qs, unquote, urlencode, urlparse, urlunparse
from xml.dom import minidom
from xml.parsers.expat import ExpatError

import xmltodict

Expand Down Expand Up @@ -46,6 +47,7 @@
MalformedXML,
MissingBucket,
MissingKey,
MissingRequestBody,
MissingVersion,
NoSystemTags,
NotAnIntegerException,
Expand Down Expand Up @@ -766,7 +768,7 @@ def _bucket_response_put(
self._authenticate_and_authorize_s3_action(bucket_name=bucket_name)

if "object-lock" in querystring:
config = self._lock_config_from_body()
config = self._process_lock_config_from_body()

self.backend.put_object_lock_configuration(
bucket_name,
Expand Down Expand Up @@ -1210,7 +1212,16 @@ def _bucket_response_delete_keys(
deleted, errored = self.backend.delete_objects(
bucket_name, objects_to_delete, bypass_retention
)
errors.extend([(err, "AccessDenied", "Access Denied") for err in errored])
errors.extend(
[
(
err,
"AccessDenied",
"Access Denied because object protected by object lock.",
)
for err in errored
]
)
else:
deleted = []
# [(key_name, errorcode, 'error message'), ..]
Expand Down Expand Up @@ -1916,6 +1927,14 @@ def _key_response_head(
else:
return 404, response_headers, ""

def _process_lock_config_from_body(self) -> Dict[str, Any]:
try:
return self._lock_config_from_body()
except (TypeError, ExpatError):
raise MissingRequestBody
except KeyError:
raise MalformedXML

def _lock_config_from_body(self) -> Dict[str, Any]:
response_dict: Dict[str, Any] = {
"enabled": False,
Expand Down Expand Up @@ -2711,10 +2730,11 @@ def _invalid_headers(self, url: str, headers: Dict[str, str]) -> bool:

S3_DELETE_KEYS_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<DeleteResult xmlns="http://s3.amazonaws.com/doc/2006-03-01">
{% for k, v in deleted %}
{% for k, v, dv in deleted %}
<Deleted>
<Key>{{k}}</Key>
{% if v %}<VersionId>{{v}}</VersionId>{% endif %}
{% if dv %}<DeleteMarkerVersionId>{{ dv }}</DeleteMarkerVersionId><DeleteMarker>true</DeleteMarker>{% endif %}
</Deleted>
{% endfor %}
{% for k,c,m in delete_errors %}
Expand Down
29 changes: 28 additions & 1 deletion tests/test_logs/test_export_tasks.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,14 +118,41 @@ def test_create_export_task_happy_path(logs, s3, log_group_name, bucket_name):
to=to,
destination=bucket_name,
)
task_id = resp["taskId"]
# taskId resembles a valid UUID (i.e. a string of 32 hexadecimal digits)
assert UUID(resp["taskId"])
assert UUID(task_id)
assert resp["ResponseMetadata"]["HTTPStatusCode"] == 200

# s3 bucket contains indication that permissions were successful
resp = s3.get_object(Bucket=bucket_name, Key="aws-logs-write-test")
assert resp["Body"].read() == b"Permission Check Successful"

try:
# ExportTask's can take a long time to succeed
# From the docs: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/S3ExportTasks.html
# > the export task might take anywhere from a few seconds to a few hours
#
# There can be only one ExportTask active at any point in time
# Cancelling this one ensures that there's no Task active
# And subsequent tests can create Export Tasks without running into a LimitExceededException
logs.cancel_export_task(taskId=task_id)
except ClientError as exc:
# Because there are no logs, the export task in AWS usually finishes very quickly
# Which is fine - we can just ignore that
assert (
exc.response["Error"]["Message"]
== "The specified export task has already finished"
)


@pytest.mark.aws_verified
def test_cancel_unknown_export_task(logs): # pylint: disable=redefined-outer-name
with pytest.raises(ClientError) as exc:
logs.cancel_export_task(taskId=str(uuid4()))
err = exc.value.response["Error"]
assert err["Code"] == "ResourceNotFoundException"
assert err["Message"] == "The specified export task does not exist."


@pytest.mark.aws_verified
def test_create_export_task_raises_ClientError_when_bucket_not_found(
Expand Down
13 changes: 11 additions & 2 deletions tests/test_s3/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
from uuid import uuid4

import boto3
from botocore.exceptions import ClientError

from moto import mock_aws
from moto.s3.responses import DEFAULT_REGION_NAME
Expand Down Expand Up @@ -60,15 +61,23 @@ def create_bucket_and_test(bucket_name, **kwargs):


def empty_bucket(client, bucket_name):
# Delete any object lock config, if set before
try:
client.get_object_lock_configuration(Bucket=bucket_name)
kwargs = {"BypassGovernanceRetention": True}
except ClientError:
# No ObjectLock set
kwargs = {}

versions = client.list_object_versions(Bucket=bucket_name).get("Versions", [])
for key in versions:
client.delete_object(
Bucket=bucket_name, Key=key["Key"], VersionId=key.get("VersionId")
Bucket=bucket_name, Key=key["Key"], VersionId=key.get("VersionId"), **kwargs
)
delete_markers = client.list_object_versions(Bucket=bucket_name).get(
"DeleteMarkers", []
)
for key in delete_markers:
client.delete_object(
Bucket=bucket_name, Key=key["Key"], VersionId=key.get("VersionId")
Bucket=bucket_name, Key=key["Key"], VersionId=key.get("VersionId"), **kwargs
)
46 changes: 27 additions & 19 deletions tests/test_s3/test_s3.py
Original file line number Diff line number Diff line change
Expand Up @@ -436,16 +436,16 @@ def test_delete_versioned_objects():
assert delete_markers is None


@mock_aws
def test_delete_missing_key():
@s3_aws_verified
@pytest.mark.aws_verified
def test_delete_missing_key(bucket_name=None):
s3_resource = boto3.resource("s3", region_name=DEFAULT_REGION_NAME)
bucket = s3_resource.Bucket("foobar")
bucket.create()
bucket = s3_resource.Bucket(bucket_name)

s3_resource.Object("foobar", "key1").put(Body=b"some value")
s3_resource.Object("foobar", "key2").put(Body=b"some value")
s3_resource.Object("foobar", "key3").put(Body=b"some value")
s3_resource.Object("foobar", "key4").put(Body=b"some value")
s3_resource.Object(bucket_name, "key1").put(Body=b"some value")
s3_resource.Object(bucket_name, "key2").put(Body=b"some value")
s3_resource.Object(bucket_name, "key3").put(Body=b"some value")
s3_resource.Object(bucket_name, "key4").put(Body=b"some value")

result = bucket.delete_objects(
Delete={
Expand Down Expand Up @@ -1786,28 +1786,36 @@ def test_deleted_versionings_list():
assert len(listed["Contents"]) == 1


@mock_aws
def test_delete_objects_for_specific_version_id():
@s3_aws_verified
@pytest.mark.aws_verified
def test_delete_objects_for_specific_version_id(bucket_name=None):
client = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
client.create_bucket(Bucket="blah")
client.put_bucket_versioning(
Bucket="blah", VersioningConfiguration={"Status": "Enabled"}
)
enable_versioning(bucket_name, client)

client.put_object(Bucket="blah", Key="test1", Body=b"test1a")
client.put_object(Bucket="blah", Key="test1", Body=b"test1b")
client.put_object(Bucket=bucket_name, Key="test1", Body=b"test1a")
client.put_object(Bucket=bucket_name, Key="test1", Body=b"test1b")

response = client.list_object_versions(Bucket="blah", Prefix="test1")
response = client.list_object_versions(Bucket=bucket_name, Prefix="test1")
id_to_delete = [v["VersionId"] for v in response["Versions"] if v["IsLatest"]][0]

response = client.delete_objects(
Bucket="blah", Delete={"Objects": [{"Key": "test1", "VersionId": id_to_delete}]}
Bucket=bucket_name,
Delete={"Objects": [{"Key": "test1", "VersionId": id_to_delete}]},
)
assert response["Deleted"] == [{"Key": "test1", "VersionId": id_to_delete}]

listed = client.list_objects_v2(Bucket="blah")
listed = client.list_objects_v2(Bucket=bucket_name)
assert len(listed["Contents"]) == 1

# DeleteObjects without specifying VersionId
response = client.delete_objects(
Bucket=bucket_name, Delete={"Objects": [{"Key": "test1"}]}
)
assert "Deleted" in response
assert response["Deleted"][0]["DeleteMarker"] is True
assert response["Deleted"][0]["DeleteMarkerVersionId"]
assert response["Deleted"][0]["Key"] == "test1"


@mock_aws
def test_delete_versioned_bucket():
Expand Down
12 changes: 6 additions & 6 deletions tests/test_s3/test_s3_bucket_policy.py
Original file line number Diff line number Diff line change
Expand Up @@ -144,6 +144,7 @@ def _put_policy(
@pytest.mark.aws_verified
def test_deny_delete_policy(bucket_name=None):
client = boto3.client("s3", "us-east-1")
resource = boto3.resource("s3", "us-east-1")

policy = {
"Version": "2012-10-17",
Expand All @@ -164,12 +165,11 @@ def test_deny_delete_policy(bucket_name=None):
with pytest.raises(ClientError):
client.delete_object(Bucket=bucket_name, Key="obj")

result = (
boto3.resource("s3", "us-east-1").Bucket(bucket_name).objects.all().delete()
)
assert result[0]["Errors"] == [
{"Key": "obj", "Code": "AccessDenied", "Message": "Access Denied"}
]
result = resource.Bucket(bucket_name).objects.all().delete()
assert result[0]["Errors"][0]["Key"] == "obj"
assert result[0]["Errors"][0]["Code"] == "AccessDenied"
# Message:
# User: {user_arn} is not authorized to perform: s3:DeleteObject on resource: "arn:aws:s3:::{bucket}/{key}" with an explicit deny in a resource-based policy

# Delete Policy to make sure bucket can be emptied during teardown
client.delete_bucket_policy(Bucket=bucket_name)
Loading

0 comments on commit 1056bf4

Please sign in to comment.