Skip to content

Commit bed9a21

Browse files
committed
code cleanup according to review notes
1 parent 9d6021c commit bed9a21

File tree

2 files changed

+11
-39
lines changed

2 files changed

+11
-39
lines changed

tests/integration/awslambda/functions/lambda_triggered_by_sqs_download_s3_file.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,6 @@ def handler(event, context):
1111
protocol = "https" if os.environ.get("USE_SSL") else "http"
1212
endpoint_url = "{}://{}:{}".format(protocol, os.environ["LOCALSTACK_HOSTNAME"], EDGE_PORT)
1313
s3 = boto3.client("s3", endpoint_url=endpoint_url, verify=False)
14-
# print(f"{os.environ['BUCKET_NAME']}")
1514
s3.download_file(
1615
os.environ["BUCKET_NAME"],
1716
os.environ["OBJECT_NAME"],

tests/integration/s3/test_s3.py

Lines changed: 11 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -316,23 +316,11 @@ def test_get_bucket_notification_configuration_no_such_bucket(self, s3_client, s
316316

317317
snapshot.match("expected_error", e.value.response)
318318

319-
@pytest.mark.aws_validated
320-
@pytest.mark.xfail(
321-
reason="currently not implemented in moto, see https://github.com/localstack/localstack/issues/6422"
322-
)
323-
def test_get_object_attributes_object_size(self, s3_client, s3_bucket):
324-
s3_client.put_object(Bucket=s3_bucket, Key="data.txt", Body=b"69\n420\n")
325-
response = s3_client.get_object_attributes(
326-
Bucket=s3_bucket, Key="data.txt", ObjectAttributes=["ObjectSize"]
327-
)
328-
assert response["ObjectSize"] == 7
329-
330319
@pytest.mark.aws_validated
331320
@pytest.mark.xfail(
332321
reason="currently not implemented in moto, see https://github.com/localstack/localstack/issues/6217"
333322
)
334-
# see also https://github.com/localstack/localstack/issues/6422
335-
# todo: see XML issue?
323+
# TODO: see also XML issue in https://github.com/localstack/localstack/issues/6422
336324
def test_get_object_attributes(self, s3_client, s3_bucket, snapshot):
337325
s3_client.put_object(Bucket=s3_bucket, Key="data.txt", Body=b"69\n420\n")
338326
response = s3_client.get_object_attributes(
@@ -379,7 +367,7 @@ def test_range_key_not_exists(self, s3_client, s3_bucket):
379367

380368
@pytest.mark.aws_validated
381369
def test_create_bucket_via_host_name(self, s3_vhost_client):
382-
# todo check redirection (happens in AWS because of region name), should it happen in LS?
370+
# TODO check redirection (happens in AWS because of region name), should it happen in LS?
383371
# https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html#VirtualHostingBackwardsCompatibility
384372
bucket_name = f"test-{short_uid()}"
385373
try:
@@ -666,7 +654,7 @@ def test_s3_object_expiry(self, s3_client, s3_bucket, snapshot):
666654
# https://stackoverflow.com/questions/38851456/aws-s3-object-expiration-less-than-24-hours
667655
# handle s3 object expiry
668656
# https://github.com/localstack/localstack/issues/1685
669-
# todo: should we have a config var to not deleted immediately in the new provider? and schedule it?
657+
# TODO: should we have a config var to not deleted immediately in the new provider? and schedule it?
670658
snapshot.add_transformer(snapshot.transform.s3_api())
671659
# put object
672660
short_expire = datetime.datetime.now(timezone("GMT")) + datetime.timedelta(seconds=1)
@@ -679,6 +667,7 @@ def test_s3_object_expiry(self, s3_client, s3_bucket, snapshot):
679667
Body="foo",
680668
Expires=short_expire,
681669
)
670+
# sleep so it expires
682671
time.sleep(3)
683672
# head_object does not raise an error for now in LS
684673
response = s3_client.head_object(Bucket=s3_bucket, Key=object_key_expired)
@@ -896,7 +885,7 @@ def test_delete_lifecycle_configuration_on_bucket_deletion(
896885
paths=[
897886
"$..ContentLanguage",
898887
"$..VersionId",
899-
"$..ETag", # todo ETag should be the same?
888+
"$..ETag", # TODO ETag should be the same?
900889
]
901890
)
902891
def test_range_header_body_length(self, s3_client, s3_bucket, snapshot):
@@ -1213,7 +1202,7 @@ def test_s3_upload_download_gzip(self, s3_client, s3_bucket, snapshot):
12131202
Body=upload_file_object.getvalue(),
12141203
)
12151204
snapshot.match("put-object", response)
1216-
# todo: check why ETag is different
1205+
# TODO: check why ETag is different
12171206

12181207
# Download gzip
12191208
downloaded_object = s3_client.get_object(Bucket=s3_bucket, Key="test.gz")
@@ -1267,7 +1256,7 @@ def test_set_external_hostname(
12671256
response = s3_multipart_upload(bucket=s3_bucket, key=key, data=content, acl=acl)
12681257
snapshot.match("multipart-upload", response)
12691258

1270-
if is_aws_cloud(): # todo: default addressing is vhost for AWS
1259+
if is_aws_cloud(): # TODO: default addressing is vhost for AWS
12711260
expected_url = f"{_bucket_url_vhost(bucket_name=s3_bucket)}/{key}"
12721261
else: # LS default is path addressing
12731262
expected_url = f"{_bucket_url(bucket_name=s3_bucket, localstack_host=config.HOSTNAME_EXTERNAL)}/{key}"
@@ -1648,7 +1637,7 @@ def test_s3_presigned_post_success_action_status_201_response(self, s3_client, s
16481637
# see localstack.services.s3.s3_listener.ProxyListenerS3.get_201_response
16491638
if is_aws_cloud():
16501639
location = f"{_bucket_url_vhost(s3_bucket, aws_stack.get_region())}/key-my-file"
1651-
etag = '"43281e21fce675ac3bcb3524b38ca4ed"' # todo check quoting of etag
1640+
etag = '"43281e21fce675ac3bcb3524b38ca4ed"' # TODO check quoting of etag
16521641
else:
16531642
location = "http://localhost/key-my-file"
16541643
etag = "d41d8cd98f00b204e9800998ecf8427f"
@@ -1915,15 +1904,11 @@ def test_s3_batch_delete_public_objects_using_requests(
19151904
assert 200 == r.status_code
19161905
response = xmltodict.parse(r.content)
19171906
response["DeleteResult"].pop("@xmlns")
1918-
# assert response["DeleteResult"]["Error"]["Key"] == object_key_1
1919-
# assert response["DeleteResult"]["Error"]["Code"] == "AccessDenied"
1920-
# assert response["DeleteResult"]["Deleted"]["Key"] == object_key_2
1907+
19211908
snapshot.match("multi-delete-with-requests", response)
19221909

19231910
response = s3_client.list_objects(Bucket=bucket_name)
19241911
snapshot.match("list-remaining-objects", response)
1925-
# assert 200 == response["ResponseMetadata"]["HTTPStatusCode"]
1926-
# assert len(response["Contents"]) == 1
19271912

19281913
@pytest.mark.aws_validated
19291914
@pytest.mark.skip_snapshot_verify(
@@ -2479,9 +2464,7 @@ def test_cors_configurations(self, s3_client, s3_create_bucket, monkeypatch, sna
24792464
url, headers={"Origin": config.get_edge_url(), "Content-Type": "text/html"}
24802465
)
24812466
assert 200 == response.status_code
2482-
# TODO this is not contained in AWS but was asserted for LocalStack
2483-
# assert "Access-Control-Allow-Headers" in response.headers
2484-
# assert "x-amz-tagging" == response.headers["Access-Control-Allow-Headers"]
2467+
24852468
snapshot.match("raw-response-headers", dict(response.headers))
24862469

24872470
BUCKET_CORS_CONFIG = {
@@ -2729,16 +2712,6 @@ def test_presigned_url_signature_authentication(
27292712
response = requests.delete(presigned_delete_url)
27302713
assert 204 == response.status_code
27312714

2732-
# TODO this does not work on AWS (not even if the first delete was not executed)
2733-
# presigned_delete_url_2 = _generate_presigned_url(
2734-
# client,
2735-
# {"Bucket": bucket_name, "Key": object_key, "VersionId": "1"},
2736-
# expires,
2737-
# client_method="delete_object",
2738-
# )
2739-
# response = requests.delete(presigned_delete_url_2)
2740-
# assert 204 == response.status_code
2741-
27422715

27432716
class TestS3DeepArchive:
27442717
"""
@@ -2835,7 +2808,7 @@ def _bucket_url_vhost(bucket_name: str, region: str = "", localstack_host: str =
28352808
return f"https://{bucket_name}.s3.{region}.amazonaws.com"
28362809
host = localstack_host or S3_VIRTUAL_HOSTNAME
28372810
s3_edge_url = config.get_edge_url(localstack_hostname=host)
2838-
# todo might add the region here
2811+
# TODO might add the region here
28392812
return s3_edge_url.replace(f"://{host}", f"://{bucket_name}.{host}")
28402813

28412814

0 commit comments

Comments
 (0)