diff --git a/.circleci/config.yml b/.circleci/config.yml
index ee7c5833fb2b9..856200e4ea3cb 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -147,7 +147,7 @@ jobs:
name: Test ASF Lambda provider
environment:
PROVIDER_OVERRIDE_LAMBDA: "asf"
- TEST_PATH: "tests/integration/awslambda/test_lambda.py tests/integration/awslambda/test_lambda_api.py tests/integration/awslambda/test_lambda_common.py tests/integration/awslambda/test_lambda_integration_sqs.py tests/integration/cloudformation/resources/test_lambda.py tests/integration/awslambda/test_lambda_integration_dynamodbstreams.py tests/integration/awslambda/test_lambda_integration_kinesis.py tests/integration/awslambda/test_lambda_developer_tools.py"
+ TEST_PATH: "tests/integration/awslambda/test_lambda.py tests/integration/awslambda/test_lambda_api.py tests/integration/awslambda/test_lambda_common.py tests/integration/awslambda/test_lambda_integration_sqs.py tests/integration/cloudformation/resources/test_lambda.py tests/integration/awslambda/test_lambda_integration_dynamodbstreams.py tests/integration/awslambda/test_lambda_integration_kinesis.py tests/integration/awslambda/test_lambda_developer_tools.py tests/integration/test_network_configuration.py::TestLambda"
PYTEST_ARGS: "--reruns 3 --junitxml=target/reports/lambda_asf.xml -o junit_suite_name='lambda_asf'"
COVERAGE_ARGS: "-p"
command: make test-coverage
@@ -172,7 +172,7 @@ jobs:
name: Test ASF S3 provider
environment:
PROVIDER_OVERRIDE_S3: "asf"
- TEST_PATH: "tests/integration/s3/"
+ TEST_PATH: "tests/integration/s3/ tests/integration/test_network_configuration.py::TestS3"
PYTEST_ARGS: "--reruns 3 --junitxml=target/reports/s3_asf.xml -o junit_suite_name='s3_asf'"
COVERAGE_ARGS: "-p"
command: make test-coverage
diff --git a/.github/workflows/pro-integration.yml b/.github/workflows/pro-integration.yml
index e44bc7ee5c33a..7406eea1f9ce3 100644
--- a/.github/workflows/pro-integration.yml
+++ b/.github/workflows/pro-integration.yml
@@ -42,7 +42,7 @@ concurrency:
jobs:
run-integration-tests:
runs-on: ubuntu-latest
- timeout-minutes: 110
+ timeout-minutes: 120
defaults:
run:
working-directory: localstack-ext
diff --git a/localstack/services/awslambda/lambda_api.py b/localstack/services/awslambda/lambda_api.py
index 5a9498792670f..a1091e4e0e16f 100644
--- a/localstack/services/awslambda/lambda_api.py
+++ b/localstack/services/awslambda/lambda_api.py
@@ -25,7 +25,7 @@
from localstack import config, constants
from localstack.aws.accounts import get_aws_account_id
-from localstack.constants import APPLICATION_JSON, LOCALHOST_HOSTNAME
+from localstack.constants import APPLICATION_JSON
from localstack.http import Request
from localstack.http import Response as HttpResponse
from localstack.services.awslambda import lambda_executors
@@ -81,6 +81,7 @@
now_utc,
timestamp,
)
+from localstack.utils.urls import localstack_host
LOG = logging.getLogger(__name__)
@@ -1511,7 +1512,10 @@ def create_url_config(function):
custom_id = md5(str(random()))
region_name = aws_stack.get_region()
- url = f"http://{custom_id}.lambda-url.{region_name}.{LOCALHOST_HOSTNAME}:{config.EDGE_PORT_HTTP or config.EDGE_PORT}/"
+ host_definition = localstack_host(
+ use_localhost_cloud=True, custom_port=config.EDGE_PORT_HTTP or config.EDGE_PORT
+ )
+ url = f"http://{custom_id}.lambda-url.{region_name}.{host_definition.host_and_port()}/"
# TODO: HTTPS support
data = json.loads(to_str(request.data))
diff --git a/localstack/services/awslambda/provider.py b/localstack/services/awslambda/provider.py
index 2eb85b1ecfaba..c3eded17c0734 100644
--- a/localstack/services/awslambda/provider.py
+++ b/localstack/services/awslambda/provider.py
@@ -134,7 +134,6 @@
UpdateFunctionUrlConfigResponse,
Version,
)
-from localstack.constants import LOCALHOST_HOSTNAME
from localstack.services.awslambda import api_utils
from localstack.services.awslambda import hooks as lambda_hooks
from localstack.services.awslambda.api_utils import STATEMENT_ID_REGEX
@@ -193,6 +192,7 @@
from localstack.utils.files import load_file
from localstack.utils.strings import get_random_hex, long_uid, short_uid, to_bytes, to_str
from localstack.utils.sync import poll_condition
+from localstack.utils.urls import localstack_host
LOG = logging.getLogger(__name__)
@@ -1632,12 +1632,16 @@ def create_function_url_config(
# create function URL config
url_id = api_utils.generate_random_url_id()
+
+ host_definition = localstack_host(
+ use_localhost_cloud=True, custom_port=config.EDGE_PORT_HTTP or config.EDGE_PORT
+ )
fn.function_url_configs[normalized_qualifier] = FunctionUrlConfig(
function_arn=function_arn,
function_name=function_name,
cors=cors,
url_id=url_id,
- url=f"http://{url_id}.lambda-url.{context.region}.{LOCALHOST_HOSTNAME}:{config.EDGE_PORT_HTTP or config.EDGE_PORT}/", # TODO: https support
+ url=f"http://{url_id}.lambda-url.{context.region}.{host_definition.host_and_port()}/", # TODO: https support
auth_type=auth_type,
creation_time=api_utils.generate_lambda_date(),
last_modified_time=api_utils.generate_lambda_date(),
diff --git a/localstack/services/opensearch/cluster_manager.py b/localstack/services/opensearch/cluster_manager.py
index 19630d3b78cfe..f018f6c918106 100644
--- a/localstack/services/opensearch/cluster_manager.py
+++ b/localstack/services/opensearch/cluster_manager.py
@@ -8,7 +8,7 @@
from localstack import config
from localstack.aws.api.opensearch import DomainEndpointOptions, EngineType
from localstack.config import EDGE_BIND_HOST
-from localstack.constants import LOCALHOST, LOCALHOST_HOSTNAME
+from localstack.constants import LOCALHOST
from localstack.services.opensearch import versions
from localstack.services.opensearch.cluster import (
CustomEndpoint,
@@ -28,6 +28,7 @@
start_thread,
)
from localstack.utils.serving import Server
+from localstack.utils.urls import localstack_host
LOG = logging.getLogger(__name__)
@@ -115,11 +116,16 @@ def build_cluster_endpoint(
assigned_port = external_service_ports.reserve_port()
else:
assigned_port = external_service_ports.reserve_port()
- return f"{config.LOCALSTACK_HOSTNAME}:{assigned_port}"
+
+ host_definition = localstack_host(use_localstack_hostname=True, custom_port=assigned_port)
+ return host_definition.host_and_port()
if config.OPENSEARCH_ENDPOINT_STRATEGY == "path":
- return f"{config.LOCALSTACK_HOSTNAME}:{config.EDGE_PORT}/{engine_domain}/{domain_key.region}/{domain_key.domain_name}"
+ host_definition = localstack_host(use_localstack_hostname=True)
+ return f"{host_definition.host_and_port()}/{engine_domain}/{domain_key.region}/{domain_key.domain_name}"
+
# or through a subdomain (domain-name.region.opensearch.localhost.localstack.cloud)
- return f"{domain_key.domain_name}.{domain_key.region}.{engine_domain}.{LOCALHOST_HOSTNAME}:{config.EDGE_PORT}"
+ host_definition = localstack_host(use_localhost_cloud=True)
+ return f"{domain_key.domain_name}.{domain_key.region}.{engine_domain}.{host_definition.host_and_port()}"
def determine_custom_endpoint(
diff --git a/localstack/services/s3/provider.py b/localstack/services/s3/provider.py
index 2d86149824b6b..845436ad796f0 100644
--- a/localstack/services/s3/provider.py
+++ b/localstack/services/s3/provider.py
@@ -94,7 +94,6 @@
preprocess_request,
serve_custom_service_request_handlers,
)
-from localstack.constants import LOCALHOST_HOSTNAME
from localstack.services.edge import ROUTER
from localstack.services.moto import call_moto
from localstack.services.plugins import ServiceLifecycleHook
@@ -130,6 +129,7 @@
from localstack.utils.collections import get_safe
from localstack.utils.patch import patch
from localstack.utils.strings import short_uid
+from localstack.utils.urls import localstack_host
LOG = logging.getLogger(__name__)
@@ -166,8 +166,13 @@ def __init__(self, message=None):
def get_full_default_bucket_location(bucket_name):
if config.HOSTNAME_EXTERNAL != config.LOCALHOST:
- return f"{config.get_protocol()}://{config.HOSTNAME_EXTERNAL}:{config.get_edge_port_http()}/{bucket_name}/"
- return f"{config.get_protocol()}://{bucket_name}.s3.{LOCALHOST_HOSTNAME}:{config.get_edge_port_http()}/"
+ host_definition = localstack_host(
+ use_hostname_external=True, custom_port=config.get_edge_port_http()
+ )
+ return f"{config.get_protocol()}://{host_definition.host_and_port()}/{bucket_name}/"
+ else:
+ host_definition = localstack_host(use_localhost_cloud=True)
+ return f"{config.get_protocol()}://{bucket_name}.s3.{host_definition.host_and_port()}/"
class S3Provider(S3Api, ServiceLifecycleHook):
diff --git a/localstack/services/s3/s3_listener.py b/localstack/services/s3/s3_listener.py
index 17f2cda76e220..17deada128585 100644
--- a/localstack/services/s3/s3_listener.py
+++ b/localstack/services/s3/s3_listener.py
@@ -64,6 +64,7 @@
to_str,
)
from localstack.utils.time import timestamp_millis
+from localstack.utils.urls import localstack_host
from localstack.utils.xml import strip_xmlns
# backend port (configured in s3_starter.py on startup)
@@ -1346,6 +1347,7 @@ def is_multipart_upload(query):
@staticmethod
def get_201_response(key, bucket_name):
+ host_definition = localstack_host(use_hostname_external=True)
return """
{protocol}://{host}/{encoded_key}
@@ -1355,7 +1357,7 @@ def get_201_response(key, bucket_name):
""".format(
protocol=get_service_protocol(),
- host=config.HOSTNAME_EXTERNAL,
+ host=host_definition.host,
encoded_key=quote(key, safe=""),
key=key,
bucket=bucket_name,
@@ -1366,12 +1368,13 @@ def get_201_response(key, bucket_name):
def _update_location(content, bucket_name):
bucket_name = normalize_bucket_name(bucket_name)
- host = config.HOSTNAME_EXTERNAL
- if ":" not in host:
- host = f"{host}:{config.service_port('s3')}"
+ host_definition = localstack_host(
+ use_hostname_external=True, custom_port=config.get_edge_port_http()
+ )
return re.sub(
r"\s*([a-zA-Z0-9\-]+)://[^/]+/([^<]+)\s*",
- r"%s://%s/%s/\2" % (get_service_protocol(), host, bucket_name),
+ r"%s://%s/%s/\2"
+ % (get_service_protocol(), host_definition.host_and_port(), bucket_name),
content,
flags=re.MULTILINE,
)
diff --git a/localstack/services/sqs/models.py b/localstack/services/sqs/models.py
index 03f542550e04e..c812c6bb53d34 100644
--- a/localstack/services/sqs/models.py
+++ b/localstack/services/sqs/models.py
@@ -9,7 +9,7 @@
from queue import PriorityQueue
from typing import Dict, NamedTuple, Optional, Set
-from localstack import config, constants
+from localstack import config
from localstack.aws.api import RequestContext
from localstack.aws.api.sqs import (
InvalidAttributeName,
@@ -21,7 +21,7 @@
ReceiptHandleIsInvalid,
TagMap,
)
-from localstack.config import external_service_url
+from localstack.config import get_protocol
from localstack.services.sqs import constants as sqs_constants
from localstack.services.sqs.exceptions import (
InvalidAttributeValue,
@@ -35,6 +35,7 @@
)
from localstack.services.stores import AccountRegionBundle, BaseStore, LocalAttribute
from localstack.utils.time import now
+from localstack.utils.urls import localstack_host
LOG = logging.getLogger(__name__)
@@ -249,13 +250,18 @@ def url(self, context: RequestContext) -> str:
# or us-east-2.queue.localhost.localstack.cloud:4566/000000000000/my-queue
region = "" if self.region == "us-east-1" else self.region + "."
scheme = context.request.scheme
- host_url = f"{scheme}://{region}queue.{constants.LOCALHOST_HOSTNAME}:{config.EDGE_PORT}"
+
+ host_definition = localstack_host(use_localhost_cloud=True)
+ host_url = f"{scheme}://{region}queue.{host_definition.host_and_port()}"
elif config.SQS_ENDPOINT_STRATEGY == "path":
# https?://localhost:4566/queue/us-east-1/00000000000/my-queue (us-east-1)
host_url = f"{context.request.host_url}queue/{self.region}"
else:
if config.SQS_PORT_EXTERNAL:
- host_url = external_service_url("sqs")
+ host_definition = localstack_host(
+ use_hostname_external=True, custom_port=config.SQS_PORT_EXTERNAL
+ )
+ host_url = f"{get_protocol()}://{host_definition.host_and_port()}"
return "{host}/{account_id}/{name}".format(
host=host_url.rstrip("/"),
diff --git a/localstack/testing/pytest/fixtures.py b/localstack/testing/pytest/fixtures.py
index 79ce964857c62..5973cad726c86 100644
--- a/localstack/testing/pytest/fixtures.py
+++ b/localstack/testing/pytest/fixtures.py
@@ -4,6 +4,7 @@
import logging
import os
import re
+import socket
import time
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple
@@ -21,7 +22,7 @@
from pytest_httpserver import HTTPServer
from werkzeug import Request, Response
-from localstack import config
+from localstack import config, constants
from localstack.aws.accounts import get_aws_account_id
from localstack.constants import TEST_AWS_ACCESS_KEY_ID, TEST_AWS_SECRET_ACCESS_KEY
from localstack.services.stores import (
@@ -2006,6 +2007,69 @@ def factory(**kwargs):
LOG.debug(f"Error cleaning up AppSync API: {api}, {e}")
+@pytest.fixture
+def assert_host_customisation(monkeypatch):
+ hostname_external = f"external-host-{short_uid()}"
+ # `LOCALSTACK_HOSTNAME` is really an internal variable that has been
+ # exposed to the user at some point in the past. It is used by some
+ # services that start resources (e.g. OpenSearch) to determine if the
+ # service has been started correctly (i.e. a health check). This means that
+ # the value must be resolvable by LocalStack or else the service resources
+ # won't start properly.
+ #
+ # One hostname that's always resolvable is the hostname of the process
+ # running LocalStack, so use that here.
+ #
+ # Note: We cannot use `localhost` since we explicitly check that the URL
+ # passed in does not contain `localhost`, unless it is requried to.
+ localstack_hostname = socket.gethostname()
+ monkeypatch.setattr(config, "HOSTNAME_EXTERNAL", hostname_external)
+ monkeypatch.setattr(config, "LOCALSTACK_HOSTNAME", localstack_hostname)
+
+ def asserter(
+ url: str,
+ *,
+ use_hostname_external: bool = False,
+ use_localstack_hostname: bool = False,
+ use_localstack_cloud: bool = False,
+ use_localhost: bool = False,
+ custom_host: Optional[str] = None,
+ ):
+ if use_hostname_external:
+ assert hostname_external in url
+
+ assert localstack_hostname not in url
+ assert constants.LOCALHOST_HOSTNAME not in url
+ assert constants.LOCALHOST not in url
+ elif use_localstack_hostname:
+ assert localstack_hostname in url
+
+ assert hostname_external not in url
+ assert constants.LOCALHOST_HOSTNAME not in url
+ assert constants.LOCALHOST not in url
+ elif use_localstack_cloud:
+ assert constants.LOCALHOST_HOSTNAME in url
+
+ assert hostname_external not in url
+ assert localstack_hostname not in url
+ elif use_localhost:
+ assert constants.LOCALHOST in url
+
+ assert constants.LOCALHOST_HOSTNAME not in url
+ assert hostname_external not in url
+ assert localstack_hostname not in url
+ elif custom_host is not None:
+ assert custom_host in url
+
+ assert constants.LOCALHOST_HOSTNAME not in url
+ assert hostname_external not in url
+ assert localstack_hostname not in url
+ else:
+ raise ValueError("no assertions made")
+
+ yield asserter
+
+
@pytest.fixture
def echo_http_server(httpserver: HTTPServer):
"""Spins up a local HTTP echo server and returns the endpoint URL"""
diff --git a/localstack/utils/urls.py b/localstack/utils/urls.py
index 2d7b58eb3c0cd..04f605a553fd4 100644
--- a/localstack/utils/urls.py
+++ b/localstack/utils/urls.py
@@ -1,6 +1,47 @@
+from dataclasses import dataclass
+from typing import Optional
+
+from localstack import config, constants
+
+
def path_from_url(url: str) -> str:
return f'/{url.partition("://")[2].partition("/")[2]}' if "://" in url else url
def hostname_from_url(url: str) -> str:
return url.split("://")[-1].split("/")[0].split(":")[0]
+
+
+@dataclass
+class HostDefinition:
+ host: str
+ port: int
+
+ def host_and_port(self):
+ return f"{self.host}:{self.port}"
+
+
+def localstack_host(
+ use_hostname_external: bool = False,
+ use_localstack_hostname: bool = False,
+ use_localhost_cloud: bool = False,
+ custom_port: Optional[int] = None,
+) -> HostDefinition:
+ """
+ Determine the host and port to return to the user based on:
+ - the user's configuration (e.g environment variable overrides)
+ - the defaults of the system
+ """
+ port = config.EDGE_PORT
+ if custom_port is not None:
+ port = custom_port
+
+ host = config.LOCALHOST
+ if use_hostname_external:
+ host = config.HOSTNAME_EXTERNAL
+ elif use_localstack_hostname:
+ host = config.LOCALSTACK_HOSTNAME
+ elif use_localhost_cloud:
+ host = constants.LOCALHOST_HOSTNAME
+
+ return HostDefinition(host=host, port=port)
diff --git a/tests/integration/test_network_configuration.py b/tests/integration/test_network_configuration.py
new file mode 100644
index 0000000000000..62a291630727b
--- /dev/null
+++ b/tests/integration/test_network_configuration.py
@@ -0,0 +1,233 @@
+"""
+This test file captures the _current_ state of returning URLs before making
+sweeping changes. This is to ensure that the refactoring does not cause
+external breaking behaviour. In the future we can update this test suite to
+correspond to the behaviour we want, and we get a todo list of things to
+change 😂
+"""
+import json
+
+import pytest
+import requests
+import xmltodict
+from botocore.auth import SigV4Auth
+
+from localstack import config
+from localstack.aws.api.lambda_ import Runtime
+from localstack.testing.aws.lambda_utils import is_new_provider, is_old_provider
+from localstack.utils.files import new_tmp_file, save_file
+from localstack.utils.strings import short_uid
+
+pytestmark = [pytest.mark.only_localstack]
+
+
+class TestOpenSearch:
+ """
+ OpenSearch does not respect any customisations and just returns a domain with localhost.localstack.cloud in.
+ """
+
+ def test_default_strategy(
+ self, opensearch_client, opensearch_wait_for_cluster, assert_host_customisation
+ ):
+ domain_name = f"domain-{short_uid()}"
+ res = opensearch_client.create_domain(DomainName=domain_name)
+ opensearch_wait_for_cluster(domain_name)
+ endpoint = res["DomainStatus"]["Endpoint"]
+
+ assert_host_customisation(endpoint, use_localstack_cloud=True)
+
+ def test_port_strategy(
+ self, monkeypatch, opensearch_client, opensearch_wait_for_cluster, assert_host_customisation
+ ):
+ monkeypatch.setattr(config, "OPENSEARCH_ENDPOINT_STRATEGY", "port")
+
+ domain_name = f"domain-{short_uid()}"
+ res = opensearch_client.create_domain(DomainName=domain_name)
+ opensearch_wait_for_cluster(domain_name)
+ endpoint = res["DomainStatus"]["Endpoint"]
+
+ if config.is_in_docker:
+ assert_host_customisation(endpoint, use_localhost=True)
+ else:
+ assert_host_customisation(endpoint, custom_host="127.0.0.1")
+
+ def test_path_strategy(
+ self, monkeypatch, opensearch_client, opensearch_wait_for_cluster, assert_host_customisation
+ ):
+ monkeypatch.setattr(config, "OPENSEARCH_ENDPOINT_STRATEGY", "path")
+
+ domain_name = f"domain-{short_uid()}"
+ res = opensearch_client.create_domain(DomainName=domain_name)
+ opensearch_wait_for_cluster(domain_name)
+ endpoint = res["DomainStatus"]["Endpoint"]
+
+ assert_host_customisation(endpoint, use_localstack_hostname=True)
+
+
+class TestS3:
+ @pytest.mark.skipif(
+ condition=config.LEGACY_S3_PROVIDER, reason="Not implemented for legacy provider"
+ )
+ def test_non_us_east_1_location(
+ self, s3_resource, s3_client, cleanups, assert_host_customisation
+ ):
+ bucket_name = f"bucket-{short_uid()}"
+ res = s3_client.create_bucket(
+ Bucket=bucket_name,
+ CreateBucketConfiguration={
+ "LocationConstraint": "eu-west-1",
+ },
+ )
+
+ def cleanup():
+ bucket = s3_resource.Bucket(bucket_name)
+ bucket.objects.all().delete()
+ bucket.object_versions.all().delete()
+ bucket.delete()
+
+ cleanups.append(cleanup)
+
+ assert_host_customisation(res["Location"], use_hostname_external=True)
+
+ def test_multipart_upload(self, s3_bucket, s3_client, assert_host_customisation):
+ key_name = f"key-{short_uid()}"
+ upload_id = s3_client.create_multipart_upload(Bucket=s3_bucket, Key=key_name)["UploadId"]
+ part_etag = s3_client.upload_part(
+ Bucket=s3_bucket, Key=key_name, Body=b"bytes", PartNumber=1, UploadId=upload_id
+ )["ETag"]
+ res = s3_client.complete_multipart_upload(
+ Bucket=s3_bucket,
+ Key=key_name,
+ MultipartUpload={"Parts": [{"ETag": part_etag, "PartNumber": 1}]},
+ UploadId=upload_id,
+ )
+
+ assert_host_customisation(res["Location"], use_hostname_external=True)
+
+ def test_201_response(self, s3_bucket, s3_client, assert_host_customisation):
+ key_name = f"key-{short_uid()}"
+ body = "body"
+ presigned_request = s3_client.generate_presigned_post(
+ Bucket=s3_bucket,
+ Key=key_name,
+ Fields={"success_action_status": "201"},
+ Conditions=[{"bucket": s3_bucket}, ["eq", "$success_action_status", "201"]],
+ )
+ files = {"file": ("my-file", body)}
+ res = requests.post(
+ presigned_request["url"],
+ data=presigned_request["fields"],
+ files=files,
+ verify=False,
+ )
+ res.raise_for_status()
+ json_response = xmltodict.parse(res.content)["PostResponse"]
+
+ assert_host_customisation(json_response["Location"], use_hostname_external=True)
+
+
+class TestSQS:
+ """
+ Test all combinations of:
+
+ * endpoint_strategy
+ * sqs_port_external
+ * hostname_external
+ """
+
+ def test_off_strategy_without_external_port(
+ self, monkeypatch, sqs_create_queue, assert_host_customisation
+ ):
+ monkeypatch.setattr(config, "SQS_ENDPOINT_STRATEGY", "off")
+
+ queue_name = f"queue-{short_uid()}"
+ queue_url = sqs_create_queue(QueueName=queue_name)
+
+ assert_host_customisation(queue_url, use_localhost=True)
+ assert queue_name in queue_url
+
+ def test_off_strategy_with_external_port(
+ self, monkeypatch, sqs_create_queue, assert_host_customisation
+ ):
+ external_port = 12345
+ monkeypatch.setattr(config, "SQS_ENDPOINT_STRATEGY", "off")
+ monkeypatch.setattr(config, "SQS_PORT_EXTERNAL", external_port)
+
+ queue_name = f"queue-{short_uid()}"
+ queue_url = sqs_create_queue(QueueName=queue_name)
+
+ assert_host_customisation(queue_url, use_hostname_external=True)
+ assert queue_name in queue_url
+ assert f":{external_port}" in queue_url
+
+ def test_domain_strategy(self, monkeypatch, sqs_create_queue, assert_host_customisation):
+ monkeypatch.setattr(config, "SQS_ENDPOINT_STRATEGY", "domain")
+
+ queue_name = f"queue-{short_uid()}"
+ queue_url = sqs_create_queue(QueueName=queue_name)
+
+ assert_host_customisation(queue_url, use_localstack_cloud=True)
+ assert queue_name in queue_url
+
+ def test_path_strategy(self, monkeypatch, sqs_create_queue, assert_host_customisation):
+ monkeypatch.setattr(config, "SQS_ENDPOINT_STRATEGY", "path")
+
+ queue_name = f"queue-{short_uid()}"
+ queue_url = sqs_create_queue(QueueName=queue_name)
+
+ assert_host_customisation(queue_url, use_localhost=True)
+ assert queue_name in queue_url
+
+
+class TestLambda:
+ @pytest.mark.skipif(condition=is_old_provider(), reason="Not implemented for legacy provider")
+ def test_function_url(self, assert_host_customisation, lambda_client, create_lambda_function):
+ function_name = f"function-{short_uid()}"
+ handler_code = ""
+ handler_file = new_tmp_file()
+ save_file(handler_file, handler_code)
+
+ create_lambda_function(
+ func_name=function_name,
+ handler_file=handler_file,
+ runtime=Runtime.python3_9,
+ )
+
+ function_url = lambda_client.create_function_url_config(
+ FunctionName=function_name,
+ AuthType="NONE",
+ )["FunctionUrl"]
+
+ assert_host_customisation(function_url, use_localstack_cloud=True)
+
+ @pytest.mark.skipif(condition=is_new_provider(), reason="Not implemented for new provider")
+ def test_http_api_for_function_url(
+ self, assert_host_customisation, create_lambda_function, aws_http_client_factory
+ ):
+ function_name = f"function-{short_uid()}"
+ handler_code = ""
+ handler_file = new_tmp_file()
+ save_file(handler_file, handler_code)
+
+ create_lambda_function(
+ func_name=function_name,
+ handler_file=handler_file,
+ runtime=Runtime.python3_9,
+ )
+
+ client = aws_http_client_factory("lambda", signer_factory=SigV4Auth)
+ url = f"/2021-10-31/functions/{function_name}/url"
+ r = client.post(
+ url,
+ data=json.dumps(
+ {
+ "AuthType": "NONE",
+ }
+ ),
+ params={"Qualifier": "$LATEST"},
+ )
+ r.raise_for_status()
+
+ function_url = r.json()["FunctionUrl"]
+
+ assert_host_customisation(function_url, use_localstack_cloud=True)