Thanks to visit codestin.com
Credit goes to github.com

Skip to content

fix image digest in startup runtime information for new runtime

Sign in for the full log view
GitHub Actions / LocalStack Community integration with Pro failed Jun 27, 2024 in 0s

4 errors, 1 fail, 396 skipped, 2 750 pass in 1h 45m 35s

    2 files      2 suites   1h 45m 35s ⏱️
3 151 tests 2 750 ✅ 396 💤 1 ❌ 4 🔥
3 153 runs  2 750 ✅ 398 💤 1 ❌ 4 🔥

Results for commit ba3ca21.

Annotations

Check failure on line 0 in tests.aws.scenario.bookstore.test_bookstore.TestBookstoreApplication

See this annotation in the file changed.

@github-actions github-actions / LocalStack Community integration with Pro

test_setup (tests.aws.scenario.bookstore.test_bookstore.TestBookstoreApplication) with error

pytest-junit-community-1.xml [took 5m 13s]
Raw output
failed on setup with "localstack.testing.pytest.fixtures.StackDeployError: Describe output:
{"StackId": "arn:aws:cloudformation:us-east-1:000000000000:stack/BookstoreStack/fd31667e", "StackName": "BookstoreStack", "ChangeSetId": "arn:aws:cloudformation:us-east-1:000000000000:changeSet/test-cs-b5d30605/10cdc884", "CreationTime": "2024-06-27T07:37:35.454Z", "LastUpdatedTime": "2024-06-27T07:37:35.454Z", "RollbackConfiguration": {}, "StackStatus": "CREATE_FAILED", "StackStatusReason": "An error occurred (ResourceAlreadyExistsException) when calling the CreateDomain operation: domain bookstorestack-domain66ac69e already exists in region us-east-1", "DisableRollback": false, "NotificationARNs": [], "Capabilities": ["CAPABILITY_AUTO_EXPAND", "CAPABILITY_IAM", "CAPABILITY_NAMED_IAM"], "Tags": [], "EnableTerminationProtection": false, "DriftInformation": {"StackDriftStatus": "NOT_CHECKED"}}
Failing resources:
- Domain66AC69E0 (AWS::OpenSearchService::Domain) -> CREATE_FAILED (An error occurred (ResourceAlreadyExistsException) when calling the CreateDomain operation: domain bookstorestack-domain66ac69e already exists in region us-east-1)
- BookstoreStack (AWS::CloudFormation::Stack) -> CREATE_FAILED (An error occurred (ResourceAlreadyExistsException) when calling the CreateDomain operation: domain bookstorestack-domain66ac69e already exists in region us-east-1)"
self = <localstack.testing.scenario.provisioning.InfraProvisioner object at 0x7fc16e930750>
skip_deployment = False

    def provision(self, skip_deployment: Optional[bool] = False):
        """
        Execute all previously added custom provisioning steps and deploy added CDK stacks via CloudFormation.
    
        Already deployed stacks will be updated instead.
        """
        self._synth()
        if skip_deployment:
            LOG.debug("Skipping deployment. Assuming stacks have already been created")
            return
    
        is_update = False
    
        if all(
            self._is_stack_deployed(stack_name, stack)
            for stack_name, stack in self.cloudformation_stacks.items()
        ):
            LOG.debug("All stacks are already deployed. Skipping the provisioning.")
            # TODO: in localstack we might want to do a delete/create
            #  but generally this won't be a common use case when developing against LocalStack
            is_update = True
    
        self._bootstrap()
        self._run_manual_setup_tasks()
        for stack_name, stack in self.cloudformation_stacks.items():
            change_set_name = f"test-cs-{short_uid()}"
            if len(stack["Template"]) > CFN_MAX_TEMPLATE_SIZE:
                # if the template size is too big, we need to upload it to s3 first
                # and use TemplateURL instead to point to the template in s3
                template_bucket_name = self._template_bucket_name()
                self._create_bucket_if_not_exists(template_bucket_name)
                key = f"{stack_name}.yaml"
                self.aws_client.s3.put_object(
                    Bucket=template_bucket_name, Key=key, Body=stack["Template"]
                )
                url = self.aws_client.s3.generate_presigned_url(
                    ClientMethod="get_object",
                    Params={"Bucket": template_bucket_name, "Key": key},
                    ExpiresIn=10,
                )
    
                change_set = self.aws_client.cloudformation.create_change_set(
                    StackName=stack_name,
                    ChangeSetName=change_set_name,
                    TemplateURL=url,
                    ChangeSetType="UPDATE" if is_update else "CREATE",
                    Capabilities=[
                        Capability.CAPABILITY_AUTO_EXPAND,
                        Capability.CAPABILITY_IAM,
                        Capability.CAPABILITY_NAMED_IAM,
                    ],
                )
            else:
                change_set = self.aws_client.cloudformation.create_change_set(
                    StackName=stack_name,
                    ChangeSetName=change_set_name,
                    TemplateBody=stack["Template"],
                    ChangeSetType="UPDATE" if is_update else "CREATE",
                    Capabilities=[
                        Capability.CAPABILITY_AUTO_EXPAND,
                        Capability.CAPABILITY_IAM,
                        Capability.CAPABILITY_NAMED_IAM,
                    ],
                )
            stack_id = self.cloudformation_stacks[stack_name]["StackId"] = change_set["StackId"]
            try:
                self.aws_client.cloudformation.get_waiter("change_set_create_complete").wait(
                    ChangeSetName=change_set["Id"],
                    WaiterConfig=WAITER_CONFIG_AWS if is_aws_cloud() else WAITER_CONFIG_LS,
                )
            except WaiterError:
                # it's OK if we don't have any updates to perform here (!)
                # there is no specific error code unfortunately
                if not (is_update):
                    raise
                else:
                    LOG.warning("Execution of change set %s failed. Assuming no changes detected.")
            else:
                self.aws_client.cloudformation.execute_change_set(ChangeSetName=change_set["Id"])
                try:
                    self.aws_client.cloudformation.get_waiter(
                        "stack_update_complete" if is_update else "stack_create_complete"
>                   ).wait(
                        StackName=stack_id,
                        WaiterConfig=WAITER_CONFIG_AWS if is_aws_cloud() else WAITER_CONFIG_LS,
                    )

../localstack/localstack-core/localstack/testing/scenario/provisioning.py:219: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
.venv/lib/python3.11/site-packages/botocore/waiter.py:55: in wait
    Waiter.wait(self, **kwargs)
../localstack/localstack-core/localstack/utils/patch.py:61: in proxy
    return new(target, *args, **kwargs)
../localstack/localstack-core/localstack/aws/connect.py:59: in my_patch
    return fn(self, **patched_kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <botocore.waiter.CloudFormation.Waiter.StackCreateComplete object at 0x7fc1524f6ed0>
kwargs = {'StackName': 'arn:aws:cloudformation:us-east-1:000000000000:stack/BookstoreStack/fd31667e'}
acceptors = [<botocore.waiter.AcceptorConfig object at 0x7fc16eadaf90>, <botocore.waiter.AcceptorConfig object at 0x7fc16eadccd0>,...tocore.waiter.AcceptorConfig object at 0x7fc152d6c690>, <botocore.waiter.AcceptorConfig object at 0x7fc1513b8ed0>, ...]
current_state = 'failure', config = {'Delay': 1, 'MaxAttempts': 600}
sleep_amount = 1, max_attempts = 600
last_matched_acceptor = <botocore.waiter.AcceptorConfig object at 0x7fc1513b9350>
num_attempts = 2

    def wait(self, **kwargs):
        acceptors = list(self.config.acceptors)
        current_state = 'waiting'
        # pop the invocation specific config
        config = kwargs.pop('WaiterConfig', {})
        sleep_amount = config.get('Delay', self.config.delay)
        max_attempts = config.get('MaxAttempts', self.config.max_attempts)
        last_matched_acceptor = None
        num_attempts = 0
    
        while True:
            response = self._operation_method(**kwargs)
            num_attempts += 1
            for acceptor in acceptors:
                if acceptor.matcher_func(response):
                    last_matched_acceptor = acceptor
                    current_state = acceptor.state
                    break
            else:
                # If none of the acceptors matched, we should
                # transition to the failure state if an error
                # response was received.
                if is_valid_waiter_error(response):
                    # Transition to a failure state, which we
                    # can just handle here by raising an exception.
                    raise WaiterError(
                        name=self.name,
                        reason='An error occurred (%s): %s'
                        % (
                            response['Error'].get('Code', 'Unknown'),
                            response['Error'].get('Message', 'Unknown'),
                        ),
                        last_response=response,
                    )
            if current_state == 'success':
                logger.debug(
                    "Waiting complete, waiter matched the " "success state."
                )
                return
            if current_state == 'failure':
                reason = 'Waiter encountered a terminal failure state: %s' % (
                    acceptor.explanation
                )
>               raise WaiterError(
                    name=self.name,
                    reason=reason,
                    last_response=response,
                )
E               botocore.exceptions.WaiterError: Waiter StackCreateComplete failed: Waiter encountered a terminal failure state: For expression "Stacks[].StackStatus" we matched expected path: "CREATE_FAILED" at least once

.venv/lib/python3.11/site-packages/botocore/waiter.py:375: WaiterError

The above exception was the direct cause of the following exception:

self = <tests.aws.scenario.bookstore.test_bookstore.TestBookstoreApplication object at 0x7fc18db8e090>
aws_client = <localstack.aws.connect.ServiceLevelClientFactory object at 0x7fc18473b2d0>
infrastructure_setup = <function infrastructure_setup.<locals>._infrastructure_setup at 0x7fc184348cc0>
patch_opensearch_strategy = <_pytest.monkeypatch.MonkeyPatch object at 0x7fc17cfbe050>

    @pytest.fixture(scope="class", autouse=True)
    def infrastructure(self, aws_client, infrastructure_setup, patch_opensearch_strategy):
        infra = infrastructure_setup("Bookstore")
    
        search_book_fn_path = os.path.join(os.path.dirname(__file__), "functions/search.py")
        search_update_fn_path = os.path.join(
            os.path.dirname(__file__), "functions/update_search_cluster.py"
        )
        # custom provisioning
        additional_packages = ["requests", "requests-aws4auth", "urllib3==1.26.6"]
        asset_bucket = infra.get_asset_bucket()
        infra.add_custom_setup_provisioning_step(
            lambda: load_python_lambda_to_s3(
                aws_client.s3,
                bucket_name=asset_bucket,
                key_name=SEARCH_KEY,
                code_path=search_book_fn_path,
                additional_python_packages=additional_packages,
            )
        )
        infra.add_custom_setup_provisioning_step(
            lambda: load_python_lambda_to_s3(
                aws_client.s3,
                bucket_name=asset_bucket,
                key_name=SEARCH_UPDATE_KEY,
                code_path=search_update_fn_path,
                additional_python_packages=additional_packages,
            )
        )
    
        # CDK-based provisioning
        stack = cdk.Stack(infra.cdk_app, "BookstoreStack")
        books_api = BooksApi(
            stack,
            "BooksApi",
            search_key=SEARCH_KEY,
            search_update_key=SEARCH_UPDATE_KEY,
        )
    
        cdk.CfnOutput(stack, "BooksTableName", value=books_api.books_table.table_name)
        cdk.CfnOutput(stack, "SearchDomain", value=books_api.opensearch_domain.domain_endpoint)
        cdk.CfnOutput(stack, "SearchDomainName", value=books_api.opensearch_domain.domain_name)
        cdk.CfnOutput(stack, "GetBooksFn", value=books_api.get_book_fn.function_name)
        cdk.CfnOutput(stack, "ListBooksFn", value=books_api.list_books_fn.function_name)
        cdk.CfnOutput(stack, "InitBooksTableFn", value=books_api.load_books_helper_fn.function_name)
        cdk.CfnOutput(stack, "SearchForBooksFn", value=books_api.search_book_fn.function_name)
    
        # set skip_teardown=True to prevent the stack to be deleted
>       with infra.provisioner(skip_teardown=False) as prov:

../localstack/tests/aws/scenario/bookstore/test_bookstore.py:110: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/hostedtoolcache/Python/3.11.9/x64/lib/python3.11/contextlib.py:137: in __enter__
    return next(self.gen)
../localstack/localstack-core/localstack/testing/scenario/provisioning.py:128: in provisioner
    self.provision(skip_deployment=skip_deployment)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <localstack.testing.scenario.provisioning.InfraProvisioner object at 0x7fc16e930750>
skip_deployment = False

    def provision(self, skip_deployment: Optional[bool] = False):
        """
        Execute all previously added custom provisioning steps and deploy added CDK stacks via CloudFormation.
    
        Already deployed stacks will be updated instead.
        """
        self._synth()
        if skip_deployment:
            LOG.debug("Skipping deployment. Assuming stacks have already been created")
            return
    
        is_update = False
    
        if all(
            self._is_stack_deployed(stack_name, stack)
            for stack_name, stack in self.cloudformation_stacks.items()
        ):
            LOG.debug("All stacks are already deployed. Skipping the provisioning.")
            # TODO: in localstack we might want to do a delete/create
            #  but generally this won't be a common use case when developing against LocalStack
            is_update = True
    
        self._bootstrap()
        self._run_manual_setup_tasks()
        for stack_name, stack in self.cloudformation_stacks.items():
            change_set_name = f"test-cs-{short_uid()}"
            if len(stack["Template"]) > CFN_MAX_TEMPLATE_SIZE:
                # if the template size is too big, we need to upload it to s3 first
                # and use TemplateURL instead to point to the template in s3
                template_bucket_name = self._template_bucket_name()
                self._create_bucket_if_not_exists(template_bucket_name)
                key = f"{stack_name}.yaml"
                self.aws_client.s3.put_object(
                    Bucket=template_bucket_name, Key=key, Body=stack["Template"]
                )
                url = self.aws_client.s3.generate_presigned_url(
                    ClientMethod="get_object",
                    Params={"Bucket": template_bucket_name, "Key": key},
                    ExpiresIn=10,
                )
    
                change_set = self.aws_client.cloudformation.create_change_set(
                    StackName=stack_name,
                    ChangeSetName=change_set_name,
                    TemplateURL=url,
                    ChangeSetType="UPDATE" if is_update else "CREATE",
                    Capabilities=[
                        Capability.CAPABILITY_AUTO_EXPAND,
                        Capability.CAPABILITY_IAM,
                        Capability.CAPABILITY_NAMED_IAM,
                    ],
                )
            else:
                change_set = self.aws_client.cloudformation.create_change_set(
                    StackName=stack_name,
                    ChangeSetName=change_set_name,
                    TemplateBody=stack["Template"],
                    ChangeSetType="UPDATE" if is_update else "CREATE",
                    Capabilities=[
                        Capability.CAPABILITY_AUTO_EXPAND,
                        Capability.CAPABILITY_IAM,
                        Capability.CAPABILITY_NAMED_IAM,
                    ],
                )
            stack_id = self.cloudformation_stacks[stack_name]["StackId"] = change_set["StackId"]
            try:
                self.aws_client.cloudformation.get_waiter("change_set_create_complete").wait(
                    ChangeSetName=change_set["Id"],
                    WaiterConfig=WAITER_CONFIG_AWS if is_aws_cloud() else WAITER_CONFIG_LS,
                )
            except WaiterError:
                # it's OK if we don't have any updates to perform here (!)
                # there is no specific error code unfortunately
                if not (is_update):
                    raise
                else:
                    LOG.warning("Execution of change set %s failed. Assuming no changes detected.")
            else:
                self.aws_client.cloudformation.execute_change_set(ChangeSetName=change_set["Id"])
                try:
                    self.aws_client.cloudformation.get_waiter(
                        "stack_update_complete" if is_update else "stack_create_complete"
                    ).wait(
                        StackName=stack_id,
                        WaiterConfig=WAITER_CONFIG_AWS if is_aws_cloud() else WAITER_CONFIG_LS,
                    )
    
                except WaiterError as e:
>                   raise StackDeployError(
                        self.aws_client.cloudformation.describe_stacks(StackName=stack_id)[
                            "Stacks"
                        ][0],
                        self.aws_client.cloudformation.describe_stack_events(StackName=stack_id)[
                            "StackEvents"
                        ],
                    ) from e
E                   localstack.testing.pytest.fixtures.StackDeployError: Describe output:
E                   {"StackId": "arn:aws:cloudformation:us-east-1:000000000000:stack/BookstoreStack/fd31667e", "StackName": "BookstoreStack", "ChangeSetId": "arn:aws:cloudformation:us-east-1:000000000000:changeSet/test-cs-b5d30605/10cdc884", "CreationTime": "2024-06-27T07:37:35.454Z", "LastUpdatedTime": "2024-06-27T07:37:35.454Z", "RollbackConfiguration": {}, "StackStatus": "CREATE_FAILED", "StackStatusReason": "An error occurred (ResourceAlreadyExistsException) when calling the CreateDomain operation: domain bookstorestack-domain66ac69e already exists in region us-east-1", "DisableRollback": false, "NotificationARNs": [], "Capabilities": ["CAPABILITY_AUTO_EXPAND", "CAPABILITY_IAM", "CAPABILITY_NAMED_IAM"], "Tags": [], "EnableTerminationProtection": false, "DriftInformation": {"StackDriftStatus": "NOT_CHECKED"}}
E                   Failing resources:
E                   - Domain66AC69E0 (AWS::OpenSearchService::Domain) -> CREATE_FAILED (An error occurred (ResourceAlreadyExistsException) when calling the CreateDomain operation: domain bookstorestack-domain66ac69e already exists in region us-east-1)
E                   - BookstoreStack (AWS::CloudFormation::Stack) -> CREATE_FAILED (An error occurred (ResourceAlreadyExistsException) when calling the CreateDomain operation: domain bookstorestack-domain66ac69e already exists in region us-east-1)

../localstack/localstack-core/localstack/testing/scenario/provisioning.py:225: StackDeployError

Check failure on line 0 in tests.aws.scenario.bookstore.test_bookstore.TestBookstoreApplication

See this annotation in the file changed.

@github-actions github-actions / LocalStack Community integration with Pro

test_lambda_dynamodb (tests.aws.scenario.bookstore.test_bookstore.TestBookstoreApplication) with error

pytest-junit-community-1.xml [took 8s]
Raw output
failed on setup with "localstack.testing.pytest.fixtures.StackDeployError: Describe output:
{"StackId": "arn:aws:cloudformation:us-east-1:000000000000:stack/BookstoreStack/38a335b5", "StackName": "BookstoreStack", "ChangeSetId": "arn:aws:cloudformation:us-east-1:000000000000:changeSet/test-cs-f0394150/6d75970a", "CreationTime": "2024-06-27T07:37:44.406Z", "LastUpdatedTime": "2024-06-27T07:37:44.406Z", "RollbackConfiguration": {}, "StackStatus": "CREATE_FAILED", "StackStatusReason": "An error occurred (ResourceAlreadyExistsException) when calling the CreateDomain operation: domain bookstorestack-domain66ac69e already exists in region us-east-1", "DisableRollback": false, "NotificationARNs": [], "Capabilities": ["CAPABILITY_AUTO_EXPAND", "CAPABILITY_IAM", "CAPABILITY_NAMED_IAM"], "Tags": [], "EnableTerminationProtection": false, "DriftInformation": {"StackDriftStatus": "NOT_CHECKED"}}
Failing resources:
- Domain66AC69E0 (AWS::OpenSearchService::Domain) -> CREATE_FAILED (An error occurred (ResourceAlreadyExistsException) when calling the CreateDomain operation: domain bookstorestack-domain66ac69e already exists in region us-east-1)
- BookstoreStack (AWS::CloudFormation::Stack) -> CREATE_FAILED (An error occurred (ResourceAlreadyExistsException) when calling the CreateDomain operation: domain bookstorestack-domain66ac69e already exists in region us-east-1)"
self = <localstack.testing.scenario.provisioning.InfraProvisioner object at 0x7fc16e711510>
skip_deployment = False

    def provision(self, skip_deployment: Optional[bool] = False):
        """
        Execute all previously added custom provisioning steps and deploy added CDK stacks via CloudFormation.
    
        Already deployed stacks will be updated instead.
        """
        self._synth()
        if skip_deployment:
            LOG.debug("Skipping deployment. Assuming stacks have already been created")
            return
    
        is_update = False
    
        if all(
            self._is_stack_deployed(stack_name, stack)
            for stack_name, stack in self.cloudformation_stacks.items()
        ):
            LOG.debug("All stacks are already deployed. Skipping the provisioning.")
            # TODO: in localstack we might want to do a delete/create
            #  but generally this won't be a common use case when developing against LocalStack
            is_update = True
    
        self._bootstrap()
        self._run_manual_setup_tasks()
        for stack_name, stack in self.cloudformation_stacks.items():
            change_set_name = f"test-cs-{short_uid()}"
            if len(stack["Template"]) > CFN_MAX_TEMPLATE_SIZE:
                # if the template size is too big, we need to upload it to s3 first
                # and use TemplateURL instead to point to the template in s3
                template_bucket_name = self._template_bucket_name()
                self._create_bucket_if_not_exists(template_bucket_name)
                key = f"{stack_name}.yaml"
                self.aws_client.s3.put_object(
                    Bucket=template_bucket_name, Key=key, Body=stack["Template"]
                )
                url = self.aws_client.s3.generate_presigned_url(
                    ClientMethod="get_object",
                    Params={"Bucket": template_bucket_name, "Key": key},
                    ExpiresIn=10,
                )
    
                change_set = self.aws_client.cloudformation.create_change_set(
                    StackName=stack_name,
                    ChangeSetName=change_set_name,
                    TemplateURL=url,
                    ChangeSetType="UPDATE" if is_update else "CREATE",
                    Capabilities=[
                        Capability.CAPABILITY_AUTO_EXPAND,
                        Capability.CAPABILITY_IAM,
                        Capability.CAPABILITY_NAMED_IAM,
                    ],
                )
            else:
                change_set = self.aws_client.cloudformation.create_change_set(
                    StackName=stack_name,
                    ChangeSetName=change_set_name,
                    TemplateBody=stack["Template"],
                    ChangeSetType="UPDATE" if is_update else "CREATE",
                    Capabilities=[
                        Capability.CAPABILITY_AUTO_EXPAND,
                        Capability.CAPABILITY_IAM,
                        Capability.CAPABILITY_NAMED_IAM,
                    ],
                )
            stack_id = self.cloudformation_stacks[stack_name]["StackId"] = change_set["StackId"]
            try:
                self.aws_client.cloudformation.get_waiter("change_set_create_complete").wait(
                    ChangeSetName=change_set["Id"],
                    WaiterConfig=WAITER_CONFIG_AWS if is_aws_cloud() else WAITER_CONFIG_LS,
                )
            except WaiterError:
                # it's OK if we don't have any updates to perform here (!)
                # there is no specific error code unfortunately
                if not (is_update):
                    raise
                else:
                    LOG.warning("Execution of change set %s failed. Assuming no changes detected.")
            else:
                self.aws_client.cloudformation.execute_change_set(ChangeSetName=change_set["Id"])
                try:
                    self.aws_client.cloudformation.get_waiter(
                        "stack_update_complete" if is_update else "stack_create_complete"
>                   ).wait(
                        StackName=stack_id,
                        WaiterConfig=WAITER_CONFIG_AWS if is_aws_cloud() else WAITER_CONFIG_LS,
                    )

../localstack/localstack-core/localstack/testing/scenario/provisioning.py:219: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
.venv/lib/python3.11/site-packages/botocore/waiter.py:55: in wait
    Waiter.wait(self, **kwargs)
../localstack/localstack-core/localstack/utils/patch.py:61: in proxy
    return new(target, *args, **kwargs)
../localstack/localstack-core/localstack/aws/connect.py:59: in my_patch
    return fn(self, **patched_kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <botocore.waiter.CloudFormation.Waiter.StackCreateComplete object at 0x7fc16e97f010>
kwargs = {'StackName': 'arn:aws:cloudformation:us-east-1:000000000000:stack/BookstoreStack/38a335b5'}
acceptors = [<botocore.waiter.AcceptorConfig object at 0x7fc16e97ec50>, <botocore.waiter.AcceptorConfig object at 0x7fc16e97eb90>,...tocore.waiter.AcceptorConfig object at 0x7fc16e9f6550>, <botocore.waiter.AcceptorConfig object at 0x7fc16e9f7cd0>, ...]
current_state = 'failure', config = {'Delay': 1, 'MaxAttempts': 600}
sleep_amount = 1, max_attempts = 600
last_matched_acceptor = <botocore.waiter.AcceptorConfig object at 0x7fc16e9f5050>
num_attempts = 2

    def wait(self, **kwargs):
        acceptors = list(self.config.acceptors)
        current_state = 'waiting'
        # pop the invocation specific config
        config = kwargs.pop('WaiterConfig', {})
        sleep_amount = config.get('Delay', self.config.delay)
        max_attempts = config.get('MaxAttempts', self.config.max_attempts)
        last_matched_acceptor = None
        num_attempts = 0
    
        while True:
            response = self._operation_method(**kwargs)
            num_attempts += 1
            for acceptor in acceptors:
                if acceptor.matcher_func(response):
                    last_matched_acceptor = acceptor
                    current_state = acceptor.state
                    break
            else:
                # If none of the acceptors matched, we should
                # transition to the failure state if an error
                # response was received.
                if is_valid_waiter_error(response):
                    # Transition to a failure state, which we
                    # can just handle here by raising an exception.
                    raise WaiterError(
                        name=self.name,
                        reason='An error occurred (%s): %s'
                        % (
                            response['Error'].get('Code', 'Unknown'),
                            response['Error'].get('Message', 'Unknown'),
                        ),
                        last_response=response,
                    )
            if current_state == 'success':
                logger.debug(
                    "Waiting complete, waiter matched the " "success state."
                )
                return
            if current_state == 'failure':
                reason = 'Waiter encountered a terminal failure state: %s' % (
                    acceptor.explanation
                )
>               raise WaiterError(
                    name=self.name,
                    reason=reason,
                    last_response=response,
                )
E               botocore.exceptions.WaiterError: Waiter StackCreateComplete failed: Waiter encountered a terminal failure state: For expression "Stacks[].StackStatus" we matched expected path: "CREATE_FAILED" at least once

.venv/lib/python3.11/site-packages/botocore/waiter.py:375: WaiterError

The above exception was the direct cause of the following exception:

self = <tests.aws.scenario.bookstore.test_bookstore.TestBookstoreApplication object at 0x7fc18db8e090>
aws_client = <localstack.aws.connect.ServiceLevelClientFactory object at 0x7fc18473b2d0>
infrastructure_setup = <function infrastructure_setup.<locals>._infrastructure_setup at 0x7fc184348cc0>
patch_opensearch_strategy = <_pytest.monkeypatch.MonkeyPatch object at 0x7fc17cfbe050>

    @pytest.fixture(scope="class", autouse=True)
    def infrastructure(self, aws_client, infrastructure_setup, patch_opensearch_strategy):
        infra = infrastructure_setup("Bookstore")
    
        search_book_fn_path = os.path.join(os.path.dirname(__file__), "functions/search.py")
        search_update_fn_path = os.path.join(
            os.path.dirname(__file__), "functions/update_search_cluster.py"
        )
        # custom provisioning
        additional_packages = ["requests", "requests-aws4auth", "urllib3==1.26.6"]
        asset_bucket = infra.get_asset_bucket()
        infra.add_custom_setup_provisioning_step(
            lambda: load_python_lambda_to_s3(
                aws_client.s3,
                bucket_name=asset_bucket,
                key_name=SEARCH_KEY,
                code_path=search_book_fn_path,
                additional_python_packages=additional_packages,
            )
        )
        infra.add_custom_setup_provisioning_step(
            lambda: load_python_lambda_to_s3(
                aws_client.s3,
                bucket_name=asset_bucket,
                key_name=SEARCH_UPDATE_KEY,
                code_path=search_update_fn_path,
                additional_python_packages=additional_packages,
            )
        )
    
        # CDK-based provisioning
        stack = cdk.Stack(infra.cdk_app, "BookstoreStack")
        books_api = BooksApi(
            stack,
            "BooksApi",
            search_key=SEARCH_KEY,
            search_update_key=SEARCH_UPDATE_KEY,
        )
    
        cdk.CfnOutput(stack, "BooksTableName", value=books_api.books_table.table_name)
        cdk.CfnOutput(stack, "SearchDomain", value=books_api.opensearch_domain.domain_endpoint)
        cdk.CfnOutput(stack, "SearchDomainName", value=books_api.opensearch_domain.domain_name)
        cdk.CfnOutput(stack, "GetBooksFn", value=books_api.get_book_fn.function_name)
        cdk.CfnOutput(stack, "ListBooksFn", value=books_api.list_books_fn.function_name)
        cdk.CfnOutput(stack, "InitBooksTableFn", value=books_api.load_books_helper_fn.function_name)
        cdk.CfnOutput(stack, "SearchForBooksFn", value=books_api.search_book_fn.function_name)
    
        # set skip_teardown=True to prevent the stack to be deleted
>       with infra.provisioner(skip_teardown=False) as prov:

../localstack/tests/aws/scenario/bookstore/test_bookstore.py:110: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/hostedtoolcache/Python/3.11.9/x64/lib/python3.11/contextlib.py:137: in __enter__
    return next(self.gen)
../localstack/localstack-core/localstack/testing/scenario/provisioning.py:128: in provisioner
    self.provision(skip_deployment=skip_deployment)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <localstack.testing.scenario.provisioning.InfraProvisioner object at 0x7fc16e711510>
skip_deployment = False

    def provision(self, skip_deployment: Optional[bool] = False):
        """
        Execute all previously added custom provisioning steps and deploy added CDK stacks via CloudFormation.
    
        Already deployed stacks will be updated instead.
        """
        self._synth()
        if skip_deployment:
            LOG.debug("Skipping deployment. Assuming stacks have already been created")
            return
    
        is_update = False
    
        if all(
            self._is_stack_deployed(stack_name, stack)
            for stack_name, stack in self.cloudformation_stacks.items()
        ):
            LOG.debug("All stacks are already deployed. Skipping the provisioning.")
            # TODO: in localstack we might want to do a delete/create
            #  but generally this won't be a common use case when developing against LocalStack
            is_update = True
    
        self._bootstrap()
        self._run_manual_setup_tasks()
        for stack_name, stack in self.cloudformation_stacks.items():
            change_set_name = f"test-cs-{short_uid()}"
            if len(stack["Template"]) > CFN_MAX_TEMPLATE_SIZE:
                # if the template size is too big, we need to upload it to s3 first
                # and use TemplateURL instead to point to the template in s3
                template_bucket_name = self._template_bucket_name()
                self._create_bucket_if_not_exists(template_bucket_name)
                key = f"{stack_name}.yaml"
                self.aws_client.s3.put_object(
                    Bucket=template_bucket_name, Key=key, Body=stack["Template"]
                )
                url = self.aws_client.s3.generate_presigned_url(
                    ClientMethod="get_object",
                    Params={"Bucket": template_bucket_name, "Key": key},
                    ExpiresIn=10,
                )
    
                change_set = self.aws_client.cloudformation.create_change_set(
                    StackName=stack_name,
                    ChangeSetName=change_set_name,
                    TemplateURL=url,
                    ChangeSetType="UPDATE" if is_update else "CREATE",
                    Capabilities=[
                        Capability.CAPABILITY_AUTO_EXPAND,
                        Capability.CAPABILITY_IAM,
                        Capability.CAPABILITY_NAMED_IAM,
                    ],
                )
            else:
                change_set = self.aws_client.cloudformation.create_change_set(
                    StackName=stack_name,
                    ChangeSetName=change_set_name,
                    TemplateBody=stack["Template"],
                    ChangeSetType="UPDATE" if is_update else "CREATE",
                    Capabilities=[
                        Capability.CAPABILITY_AUTO_EXPAND,
                        Capability.CAPABILITY_IAM,
                        Capability.CAPABILITY_NAMED_IAM,
                    ],
                )
            stack_id = self.cloudformation_stacks[stack_name]["StackId"] = change_set["StackId"]
            try:
                self.aws_client.cloudformation.get_waiter("change_set_create_complete").wait(
                    ChangeSetName=change_set["Id"],
                    WaiterConfig=WAITER_CONFIG_AWS if is_aws_cloud() else WAITER_CONFIG_LS,
                )
            except WaiterError:
                # it's OK if we don't have any updates to perform here (!)
                # there is no specific error code unfortunately
                if not (is_update):
                    raise
                else:
                    LOG.warning("Execution of change set %s failed. Assuming no changes detected.")
            else:
                self.aws_client.cloudformation.execute_change_set(ChangeSetName=change_set["Id"])
                try:
                    self.aws_client.cloudformation.get_waiter(
                        "stack_update_complete" if is_update else "stack_create_complete"
                    ).wait(
                        StackName=stack_id,
                        WaiterConfig=WAITER_CONFIG_AWS if is_aws_cloud() else WAITER_CONFIG_LS,
                    )
    
                except WaiterError as e:
>                   raise StackDeployError(
                        self.aws_client.cloudformation.describe_stacks(StackName=stack_id)[
                            "Stacks"
                        ][0],
                        self.aws_client.cloudformation.describe_stack_events(StackName=stack_id)[
                            "StackEvents"
                        ],
                    ) from e
E                   localstack.testing.pytest.fixtures.StackDeployError: Describe output:
E                   {"StackId": "arn:aws:cloudformation:us-east-1:000000000000:stack/BookstoreStack/38a335b5", "StackName": "BookstoreStack", "ChangeSetId": "arn:aws:cloudformation:us-east-1:000000000000:changeSet/test-cs-f0394150/6d75970a", "CreationTime": "2024-06-27T07:37:44.406Z", "LastUpdatedTime": "2024-06-27T07:37:44.406Z", "RollbackConfiguration": {}, "StackStatus": "CREATE_FAILED", "StackStatusReason": "An error occurred (ResourceAlreadyExistsException) when calling the CreateDomain operation: domain bookstorestack-domain66ac69e already exists in region us-east-1", "DisableRollback": false, "NotificationARNs": [], "Capabilities": ["CAPABILITY_AUTO_EXPAND", "CAPABILITY_IAM", "CAPABILITY_NAMED_IAM"], "Tags": [], "EnableTerminationProtection": false, "DriftInformation": {"StackDriftStatus": "NOT_CHECKED"}}
E                   Failing resources:
E                   - Domain66AC69E0 (AWS::OpenSearchService::Domain) -> CREATE_FAILED (An error occurred (ResourceAlreadyExistsException) when calling the CreateDomain operation: domain bookstorestack-domain66ac69e already exists in region us-east-1)
E                   - BookstoreStack (AWS::CloudFormation::Stack) -> CREATE_FAILED (An error occurred (ResourceAlreadyExistsException) when calling the CreateDomain operation: domain bookstorestack-domain66ac69e already exists in region us-east-1)

../localstack/localstack-core/localstack/testing/scenario/provisioning.py:225: StackDeployError

Check failure on line 0 in tests.aws.scenario.bookstore.test_bookstore.TestBookstoreApplication

See this annotation in the file changed.

@github-actions github-actions / LocalStack Community integration with Pro

test_search_books (tests.aws.scenario.bookstore.test_bookstore.TestBookstoreApplication) with error

pytest-junit-community-1.xml [took 8s]
Raw output
failed on setup with "localstack.testing.pytest.fixtures.StackDeployError: Describe output:
{"StackId": "arn:aws:cloudformation:us-east-1:000000000000:stack/BookstoreStack/5cf75d33", "StackName": "BookstoreStack", "ChangeSetId": "arn:aws:cloudformation:us-east-1:000000000000:changeSet/test-cs-eb467492/e4cc0fcc", "CreationTime": "2024-06-27T07:37:53.277Z", "LastUpdatedTime": "2024-06-27T07:37:53.277Z", "RollbackConfiguration": {}, "StackStatus": "CREATE_FAILED", "StackStatusReason": "An error occurred (ResourceAlreadyExistsException) when calling the CreateDomain operation: domain bookstorestack-domain66ac69e already exists in region us-east-1", "DisableRollback": false, "NotificationARNs": [], "Capabilities": ["CAPABILITY_AUTO_EXPAND", "CAPABILITY_IAM", "CAPABILITY_NAMED_IAM"], "Tags": [], "EnableTerminationProtection": false, "DriftInformation": {"StackDriftStatus": "NOT_CHECKED"}}
Failing resources:
- Domain66AC69E0 (AWS::OpenSearchService::Domain) -> CREATE_FAILED (An error occurred (ResourceAlreadyExistsException) when calling the CreateDomain operation: domain bookstorestack-domain66ac69e already exists in region us-east-1)
- BookstoreStack (AWS::CloudFormation::Stack) -> CREATE_FAILED (An error occurred (ResourceAlreadyExistsException) when calling the CreateDomain operation: domain bookstorestack-domain66ac69e already exists in region us-east-1)"
self = <localstack.testing.scenario.provisioning.InfraProvisioner object at 0x7fc16e78d090>
skip_deployment = False

    def provision(self, skip_deployment: Optional[bool] = False):
        """
        Execute all previously added custom provisioning steps and deploy added CDK stacks via CloudFormation.
    
        Already deployed stacks will be updated instead.
        """
        self._synth()
        if skip_deployment:
            LOG.debug("Skipping deployment. Assuming stacks have already been created")
            return
    
        is_update = False
    
        if all(
            self._is_stack_deployed(stack_name, stack)
            for stack_name, stack in self.cloudformation_stacks.items()
        ):
            LOG.debug("All stacks are already deployed. Skipping the provisioning.")
            # TODO: in localstack we might want to do a delete/create
            #  but generally this won't be a common use case when developing against LocalStack
            is_update = True
    
        self._bootstrap()
        self._run_manual_setup_tasks()
        for stack_name, stack in self.cloudformation_stacks.items():
            change_set_name = f"test-cs-{short_uid()}"
            if len(stack["Template"]) > CFN_MAX_TEMPLATE_SIZE:
                # if the template size is too big, we need to upload it to s3 first
                # and use TemplateURL instead to point to the template in s3
                template_bucket_name = self._template_bucket_name()
                self._create_bucket_if_not_exists(template_bucket_name)
                key = f"{stack_name}.yaml"
                self.aws_client.s3.put_object(
                    Bucket=template_bucket_name, Key=key, Body=stack["Template"]
                )
                url = self.aws_client.s3.generate_presigned_url(
                    ClientMethod="get_object",
                    Params={"Bucket": template_bucket_name, "Key": key},
                    ExpiresIn=10,
                )
    
                change_set = self.aws_client.cloudformation.create_change_set(
                    StackName=stack_name,
                    ChangeSetName=change_set_name,
                    TemplateURL=url,
                    ChangeSetType="UPDATE" if is_update else "CREATE",
                    Capabilities=[
                        Capability.CAPABILITY_AUTO_EXPAND,
                        Capability.CAPABILITY_IAM,
                        Capability.CAPABILITY_NAMED_IAM,
                    ],
                )
            else:
                change_set = self.aws_client.cloudformation.create_change_set(
                    StackName=stack_name,
                    ChangeSetName=change_set_name,
                    TemplateBody=stack["Template"],
                    ChangeSetType="UPDATE" if is_update else "CREATE",
                    Capabilities=[
                        Capability.CAPABILITY_AUTO_EXPAND,
                        Capability.CAPABILITY_IAM,
                        Capability.CAPABILITY_NAMED_IAM,
                    ],
                )
            stack_id = self.cloudformation_stacks[stack_name]["StackId"] = change_set["StackId"]
            try:
                self.aws_client.cloudformation.get_waiter("change_set_create_complete").wait(
                    ChangeSetName=change_set["Id"],
                    WaiterConfig=WAITER_CONFIG_AWS if is_aws_cloud() else WAITER_CONFIG_LS,
                )
            except WaiterError:
                # it's OK if we don't have any updates to perform here (!)
                # there is no specific error code unfortunately
                if not (is_update):
                    raise
                else:
                    LOG.warning("Execution of change set %s failed. Assuming no changes detected.")
            else:
                self.aws_client.cloudformation.execute_change_set(ChangeSetName=change_set["Id"])
                try:
                    self.aws_client.cloudformation.get_waiter(
                        "stack_update_complete" if is_update else "stack_create_complete"
>                   ).wait(
                        StackName=stack_id,
                        WaiterConfig=WAITER_CONFIG_AWS if is_aws_cloud() else WAITER_CONFIG_LS,
                    )

../localstack/localstack-core/localstack/testing/scenario/provisioning.py:219: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
.venv/lib/python3.11/site-packages/botocore/waiter.py:55: in wait
    Waiter.wait(self, **kwargs)
../localstack/localstack-core/localstack/utils/patch.py:61: in proxy
    return new(target, *args, **kwargs)
../localstack/localstack-core/localstack/aws/connect.py:59: in my_patch
    return fn(self, **patched_kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <botocore.waiter.CloudFormation.Waiter.StackCreateComplete object at 0x7fc16e776e10>
kwargs = {'StackName': 'arn:aws:cloudformation:us-east-1:000000000000:stack/BookstoreStack/5cf75d33'}
acceptors = [<botocore.waiter.AcceptorConfig object at 0x7fc16e7761d0>, <botocore.waiter.AcceptorConfig object at 0x7fc16e777590>,...tocore.waiter.AcceptorConfig object at 0x7fc16e73df10>, <botocore.waiter.AcceptorConfig object at 0x7fc16e73e8d0>, ...]
current_state = 'failure', config = {'Delay': 1, 'MaxAttempts': 600}
sleep_amount = 1, max_attempts = 600
last_matched_acceptor = <botocore.waiter.AcceptorConfig object at 0x7fc16e73e990>
num_attempts = 2

    def wait(self, **kwargs):
        acceptors = list(self.config.acceptors)
        current_state = 'waiting'
        # pop the invocation specific config
        config = kwargs.pop('WaiterConfig', {})
        sleep_amount = config.get('Delay', self.config.delay)
        max_attempts = config.get('MaxAttempts', self.config.max_attempts)
        last_matched_acceptor = None
        num_attempts = 0
    
        while True:
            response = self._operation_method(**kwargs)
            num_attempts += 1
            for acceptor in acceptors:
                if acceptor.matcher_func(response):
                    last_matched_acceptor = acceptor
                    current_state = acceptor.state
                    break
            else:
                # If none of the acceptors matched, we should
                # transition to the failure state if an error
                # response was received.
                if is_valid_waiter_error(response):
                    # Transition to a failure state, which we
                    # can just handle here by raising an exception.
                    raise WaiterError(
                        name=self.name,
                        reason='An error occurred (%s): %s'
                        % (
                            response['Error'].get('Code', 'Unknown'),
                            response['Error'].get('Message', 'Unknown'),
                        ),
                        last_response=response,
                    )
            if current_state == 'success':
                logger.debug(
                    "Waiting complete, waiter matched the " "success state."
                )
                return
            if current_state == 'failure':
                reason = 'Waiter encountered a terminal failure state: %s' % (
                    acceptor.explanation
                )
>               raise WaiterError(
                    name=self.name,
                    reason=reason,
                    last_response=response,
                )
E               botocore.exceptions.WaiterError: Waiter StackCreateComplete failed: Waiter encountered a terminal failure state: For expression "Stacks[].StackStatus" we matched expected path: "CREATE_FAILED" at least once

.venv/lib/python3.11/site-packages/botocore/waiter.py:375: WaiterError

The above exception was the direct cause of the following exception:

self = <tests.aws.scenario.bookstore.test_bookstore.TestBookstoreApplication object at 0x7fc18db8e090>
aws_client = <localstack.aws.connect.ServiceLevelClientFactory object at 0x7fc18473b2d0>
infrastructure_setup = <function infrastructure_setup.<locals>._infrastructure_setup at 0x7fc184348cc0>
patch_opensearch_strategy = <_pytest.monkeypatch.MonkeyPatch object at 0x7fc17cfbe050>

    @pytest.fixture(scope="class", autouse=True)
    def infrastructure(self, aws_client, infrastructure_setup, patch_opensearch_strategy):
        infra = infrastructure_setup("Bookstore")
    
        search_book_fn_path = os.path.join(os.path.dirname(__file__), "functions/search.py")
        search_update_fn_path = os.path.join(
            os.path.dirname(__file__), "functions/update_search_cluster.py"
        )
        # custom provisioning
        additional_packages = ["requests", "requests-aws4auth", "urllib3==1.26.6"]
        asset_bucket = infra.get_asset_bucket()
        infra.add_custom_setup_provisioning_step(
            lambda: load_python_lambda_to_s3(
                aws_client.s3,
                bucket_name=asset_bucket,
                key_name=SEARCH_KEY,
                code_path=search_book_fn_path,
                additional_python_packages=additional_packages,
            )
        )
        infra.add_custom_setup_provisioning_step(
            lambda: load_python_lambda_to_s3(
                aws_client.s3,
                bucket_name=asset_bucket,
                key_name=SEARCH_UPDATE_KEY,
                code_path=search_update_fn_path,
                additional_python_packages=additional_packages,
            )
        )
    
        # CDK-based provisioning
        stack = cdk.Stack(infra.cdk_app, "BookstoreStack")
        books_api = BooksApi(
            stack,
            "BooksApi",
            search_key=SEARCH_KEY,
            search_update_key=SEARCH_UPDATE_KEY,
        )
    
        cdk.CfnOutput(stack, "BooksTableName", value=books_api.books_table.table_name)
        cdk.CfnOutput(stack, "SearchDomain", value=books_api.opensearch_domain.domain_endpoint)
        cdk.CfnOutput(stack, "SearchDomainName", value=books_api.opensearch_domain.domain_name)
        cdk.CfnOutput(stack, "GetBooksFn", value=books_api.get_book_fn.function_name)
        cdk.CfnOutput(stack, "ListBooksFn", value=books_api.list_books_fn.function_name)
        cdk.CfnOutput(stack, "InitBooksTableFn", value=books_api.load_books_helper_fn.function_name)
        cdk.CfnOutput(stack, "SearchForBooksFn", value=books_api.search_book_fn.function_name)
    
        # set skip_teardown=True to prevent the stack to be deleted
>       with infra.provisioner(skip_teardown=False) as prov:

../localstack/tests/aws/scenario/bookstore/test_bookstore.py:110: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/hostedtoolcache/Python/3.11.9/x64/lib/python3.11/contextlib.py:137: in __enter__
    return next(self.gen)
../localstack/localstack-core/localstack/testing/scenario/provisioning.py:128: in provisioner
    self.provision(skip_deployment=skip_deployment)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <localstack.testing.scenario.provisioning.InfraProvisioner object at 0x7fc16e78d090>
skip_deployment = False

    def provision(self, skip_deployment: Optional[bool] = False):
        """
        Execute all previously added custom provisioning steps and deploy added CDK stacks via CloudFormation.
    
        Already deployed stacks will be updated instead.
        """
        self._synth()
        if skip_deployment:
            LOG.debug("Skipping deployment. Assuming stacks have already been created")
            return
    
        is_update = False
    
        if all(
            self._is_stack_deployed(stack_name, stack)
            for stack_name, stack in self.cloudformation_stacks.items()
        ):
            LOG.debug("All stacks are already deployed. Skipping the provisioning.")
            # TODO: in localstack we might want to do a delete/create
            #  but generally this won't be a common use case when developing against LocalStack
            is_update = True
    
        self._bootstrap()
        self._run_manual_setup_tasks()
        for stack_name, stack in self.cloudformation_stacks.items():
            change_set_name = f"test-cs-{short_uid()}"
            if len(stack["Template"]) > CFN_MAX_TEMPLATE_SIZE:
                # if the template size is too big, we need to upload it to s3 first
                # and use TemplateURL instead to point to the template in s3
                template_bucket_name = self._template_bucket_name()
                self._create_bucket_if_not_exists(template_bucket_name)
                key = f"{stack_name}.yaml"
                self.aws_client.s3.put_object(
                    Bucket=template_bucket_name, Key=key, Body=stack["Template"]
                )
                url = self.aws_client.s3.generate_presigned_url(
                    ClientMethod="get_object",
                    Params={"Bucket": template_bucket_name, "Key": key},
                    ExpiresIn=10,
                )
    
                change_set = self.aws_client.cloudformation.create_change_set(
                    StackName=stack_name,
                    ChangeSetName=change_set_name,
                    TemplateURL=url,
                    ChangeSetType="UPDATE" if is_update else "CREATE",
                    Capabilities=[
                        Capability.CAPABILITY_AUTO_EXPAND,
                        Capability.CAPABILITY_IAM,
                        Capability.CAPABILITY_NAMED_IAM,
                    ],
                )
            else:
                change_set = self.aws_client.cloudformation.create_change_set(
                    StackName=stack_name,
                    ChangeSetName=change_set_name,
                    TemplateBody=stack["Template"],
                    ChangeSetType="UPDATE" if is_update else "CREATE",
                    Capabilities=[
                        Capability.CAPABILITY_AUTO_EXPAND,
                        Capability.CAPABILITY_IAM,
                        Capability.CAPABILITY_NAMED_IAM,
                    ],
                )
            stack_id = self.cloudformation_stacks[stack_name]["StackId"] = change_set["StackId"]
            try:
                self.aws_client.cloudformation.get_waiter("change_set_create_complete").wait(
                    ChangeSetName=change_set["Id"],
                    WaiterConfig=WAITER_CONFIG_AWS if is_aws_cloud() else WAITER_CONFIG_LS,
                )
            except WaiterError:
                # it's OK if we don't have any updates to perform here (!)
                # there is no specific error code unfortunately
                if not (is_update):
                    raise
                else:
                    LOG.warning("Execution of change set %s failed. Assuming no changes detected.")
            else:
                self.aws_client.cloudformation.execute_change_set(ChangeSetName=change_set["Id"])
                try:
                    self.aws_client.cloudformation.get_waiter(
                        "stack_update_complete" if is_update else "stack_create_complete"
                    ).wait(
                        StackName=stack_id,
                        WaiterConfig=WAITER_CONFIG_AWS if is_aws_cloud() else WAITER_CONFIG_LS,
                    )
    
                except WaiterError as e:
>                   raise StackDeployError(
                        self.aws_client.cloudformation.describe_stacks(StackName=stack_id)[
                            "Stacks"
                        ][0],
                        self.aws_client.cloudformation.describe_stack_events(StackName=stack_id)[
                            "StackEvents"
                        ],
                    ) from e
E                   localstack.testing.pytest.fixtures.StackDeployError: Describe output:
E                   {"StackId": "arn:aws:cloudformation:us-east-1:000000000000:stack/BookstoreStack/5cf75d33", "StackName": "BookstoreStack", "ChangeSetId": "arn:aws:cloudformation:us-east-1:000000000000:changeSet/test-cs-eb467492/e4cc0fcc", "CreationTime": "2024-06-27T07:37:53.277Z", "LastUpdatedTime": "2024-06-27T07:37:53.277Z", "RollbackConfiguration": {}, "StackStatus": "CREATE_FAILED", "StackStatusReason": "An error occurred (ResourceAlreadyExistsException) when calling the CreateDomain operation: domain bookstorestack-domain66ac69e already exists in region us-east-1", "DisableRollback": false, "NotificationARNs": [], "Capabilities": ["CAPABILITY_AUTO_EXPAND", "CAPABILITY_IAM", "CAPABILITY_NAMED_IAM"], "Tags": [], "EnableTerminationProtection": false, "DriftInformation": {"StackDriftStatus": "NOT_CHECKED"}}
E                   Failing resources:
E                   - Domain66AC69E0 (AWS::OpenSearchService::Domain) -> CREATE_FAILED (An error occurred (ResourceAlreadyExistsException) when calling the CreateDomain operation: domain bookstorestack-domain66ac69e already exists in region us-east-1)
E                   - BookstoreStack (AWS::CloudFormation::Stack) -> CREATE_FAILED (An error occurred (ResourceAlreadyExistsException) when calling the CreateDomain operation: domain bookstorestack-domain66ac69e already exists in region us-east-1)

../localstack/localstack-core/localstack/testing/scenario/provisioning.py:225: StackDeployError

Check failure on line 0 in tests.aws.scenario.bookstore.test_bookstore.TestBookstoreApplication

See this annotation in the file changed.

@github-actions github-actions / LocalStack Community integration with Pro

test_opensearch_crud (tests.aws.scenario.bookstore.test_bookstore.TestBookstoreApplication) with error

pytest-junit-community-1.xml [took 8s]
Raw output
failed on setup with "localstack.testing.pytest.fixtures.StackDeployError: Describe output:
{"StackId": "arn:aws:cloudformation:us-east-1:000000000000:stack/BookstoreStack/3a123e56", "StackName": "BookstoreStack", "ChangeSetId": "arn:aws:cloudformation:us-east-1:000000000000:changeSet/test-cs-a557bc82/c583d8db", "CreationTime": "2024-06-27T07:38:02.701Z", "LastUpdatedTime": "2024-06-27T07:38:02.701Z", "RollbackConfiguration": {}, "StackStatus": "CREATE_FAILED", "StackStatusReason": "An error occurred (ResourceAlreadyExistsException) when calling the CreateDomain operation: domain bookstorestack-domain66ac69e already exists in region us-east-1", "DisableRollback": false, "NotificationARNs": [], "Capabilities": ["CAPABILITY_AUTO_EXPAND", "CAPABILITY_IAM", "CAPABILITY_NAMED_IAM"], "Tags": [], "EnableTerminationProtection": false, "DriftInformation": {"StackDriftStatus": "NOT_CHECKED"}}
Failing resources:
- Domain66AC69E0 (AWS::OpenSearchService::Domain) -> CREATE_FAILED (An error occurred (ResourceAlreadyExistsException) when calling the CreateDomain operation: domain bookstorestack-domain66ac69e already exists in region us-east-1)
- BookstoreStack (AWS::CloudFormation::Stack) -> CREATE_FAILED (An error occurred (ResourceAlreadyExistsException) when calling the CreateDomain operation: domain bookstorestack-domain66ac69e already exists in region us-east-1)"
self = <localstack.testing.scenario.provisioning.InfraProvisioner object at 0x7fc16ef86410>
skip_deployment = False

    def provision(self, skip_deployment: Optional[bool] = False):
        """
        Execute all previously added custom provisioning steps and deploy added CDK stacks via CloudFormation.
    
        Already deployed stacks will be updated instead.
        """
        self._synth()
        if skip_deployment:
            LOG.debug("Skipping deployment. Assuming stacks have already been created")
            return
    
        is_update = False
    
        if all(
            self._is_stack_deployed(stack_name, stack)
            for stack_name, stack in self.cloudformation_stacks.items()
        ):
            LOG.debug("All stacks are already deployed. Skipping the provisioning.")
            # TODO: in localstack we might want to do a delete/create
            #  but generally this won't be a common use case when developing against LocalStack
            is_update = True
    
        self._bootstrap()
        self._run_manual_setup_tasks()
        for stack_name, stack in self.cloudformation_stacks.items():
            change_set_name = f"test-cs-{short_uid()}"
            if len(stack["Template"]) > CFN_MAX_TEMPLATE_SIZE:
                # if the template size is too big, we need to upload it to s3 first
                # and use TemplateURL instead to point to the template in s3
                template_bucket_name = self._template_bucket_name()
                self._create_bucket_if_not_exists(template_bucket_name)
                key = f"{stack_name}.yaml"
                self.aws_client.s3.put_object(
                    Bucket=template_bucket_name, Key=key, Body=stack["Template"]
                )
                url = self.aws_client.s3.generate_presigned_url(
                    ClientMethod="get_object",
                    Params={"Bucket": template_bucket_name, "Key": key},
                    ExpiresIn=10,
                )
    
                change_set = self.aws_client.cloudformation.create_change_set(
                    StackName=stack_name,
                    ChangeSetName=change_set_name,
                    TemplateURL=url,
                    ChangeSetType="UPDATE" if is_update else "CREATE",
                    Capabilities=[
                        Capability.CAPABILITY_AUTO_EXPAND,
                        Capability.CAPABILITY_IAM,
                        Capability.CAPABILITY_NAMED_IAM,
                    ],
                )
            else:
                change_set = self.aws_client.cloudformation.create_change_set(
                    StackName=stack_name,
                    ChangeSetName=change_set_name,
                    TemplateBody=stack["Template"],
                    ChangeSetType="UPDATE" if is_update else "CREATE",
                    Capabilities=[
                        Capability.CAPABILITY_AUTO_EXPAND,
                        Capability.CAPABILITY_IAM,
                        Capability.CAPABILITY_NAMED_IAM,
                    ],
                )
            stack_id = self.cloudformation_stacks[stack_name]["StackId"] = change_set["StackId"]
            try:
                self.aws_client.cloudformation.get_waiter("change_set_create_complete").wait(
                    ChangeSetName=change_set["Id"],
                    WaiterConfig=WAITER_CONFIG_AWS if is_aws_cloud() else WAITER_CONFIG_LS,
                )
            except WaiterError:
                # it's OK if we don't have any updates to perform here (!)
                # there is no specific error code unfortunately
                if not (is_update):
                    raise
                else:
                    LOG.warning("Execution of change set %s failed. Assuming no changes detected.")
            else:
                self.aws_client.cloudformation.execute_change_set(ChangeSetName=change_set["Id"])
                try:
                    self.aws_client.cloudformation.get_waiter(
                        "stack_update_complete" if is_update else "stack_create_complete"
>                   ).wait(
                        StackName=stack_id,
                        WaiterConfig=WAITER_CONFIG_AWS if is_aws_cloud() else WAITER_CONFIG_LS,
                    )

../localstack/localstack-core/localstack/testing/scenario/provisioning.py:219: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
.venv/lib/python3.11/site-packages/botocore/waiter.py:55: in wait
    Waiter.wait(self, **kwargs)
../localstack/localstack-core/localstack/utils/patch.py:61: in proxy
    return new(target, *args, **kwargs)
../localstack/localstack-core/localstack/aws/connect.py:59: in my_patch
    return fn(self, **patched_kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <botocore.waiter.CloudFormation.Waiter.StackCreateComplete object at 0x7fc152f65890>
kwargs = {'StackName': 'arn:aws:cloudformation:us-east-1:000000000000:stack/BookstoreStack/3a123e56'}
acceptors = [<botocore.waiter.AcceptorConfig object at 0x7fc1524f5110>, <botocore.waiter.AcceptorConfig object at 0x7fc152f66490>,...tocore.waiter.AcceptorConfig object at 0x7fc152f66650>, <botocore.waiter.AcceptorConfig object at 0x7fc152fcc090>, ...]
current_state = 'failure', config = {'Delay': 1, 'MaxAttempts': 600}
sleep_amount = 1, max_attempts = 600
last_matched_acceptor = <botocore.waiter.AcceptorConfig object at 0x7fc152f676d0>
num_attempts = 2

    def wait(self, **kwargs):
        acceptors = list(self.config.acceptors)
        current_state = 'waiting'
        # pop the invocation specific config
        config = kwargs.pop('WaiterConfig', {})
        sleep_amount = config.get('Delay', self.config.delay)
        max_attempts = config.get('MaxAttempts', self.config.max_attempts)
        last_matched_acceptor = None
        num_attempts = 0
    
        while True:
            response = self._operation_method(**kwargs)
            num_attempts += 1
            for acceptor in acceptors:
                if acceptor.matcher_func(response):
                    last_matched_acceptor = acceptor
                    current_state = acceptor.state
                    break
            else:
                # If none of the acceptors matched, we should
                # transition to the failure state if an error
                # response was received.
                if is_valid_waiter_error(response):
                    # Transition to a failure state, which we
                    # can just handle here by raising an exception.
                    raise WaiterError(
                        name=self.name,
                        reason='An error occurred (%s): %s'
                        % (
                            response['Error'].get('Code', 'Unknown'),
                            response['Error'].get('Message', 'Unknown'),
                        ),
                        last_response=response,
                    )
            if current_state == 'success':
                logger.debug(
                    "Waiting complete, waiter matched the " "success state."
                )
                return
            if current_state == 'failure':
                reason = 'Waiter encountered a terminal failure state: %s' % (
                    acceptor.explanation
                )
>               raise WaiterError(
                    name=self.name,
                    reason=reason,
                    last_response=response,
                )
E               botocore.exceptions.WaiterError: Waiter StackCreateComplete failed: Waiter encountered a terminal failure state: For expression "Stacks[].StackStatus" we matched expected path: "CREATE_FAILED" at least once

.venv/lib/python3.11/site-packages/botocore/waiter.py:375: WaiterError

The above exception was the direct cause of the following exception:

self = <tests.aws.scenario.bookstore.test_bookstore.TestBookstoreApplication object at 0x7fc18db8e090>
aws_client = <localstack.aws.connect.ServiceLevelClientFactory object at 0x7fc18473b2d0>
infrastructure_setup = <function infrastructure_setup.<locals>._infrastructure_setup at 0x7fc184348cc0>
patch_opensearch_strategy = <_pytest.monkeypatch.MonkeyPatch object at 0x7fc17cfbe050>

    @pytest.fixture(scope="class", autouse=True)
    def infrastructure(self, aws_client, infrastructure_setup, patch_opensearch_strategy):
        infra = infrastructure_setup("Bookstore")
    
        search_book_fn_path = os.path.join(os.path.dirname(__file__), "functions/search.py")
        search_update_fn_path = os.path.join(
            os.path.dirname(__file__), "functions/update_search_cluster.py"
        )
        # custom provisioning
        additional_packages = ["requests", "requests-aws4auth", "urllib3==1.26.6"]
        asset_bucket = infra.get_asset_bucket()
        infra.add_custom_setup_provisioning_step(
            lambda: load_python_lambda_to_s3(
                aws_client.s3,
                bucket_name=asset_bucket,
                key_name=SEARCH_KEY,
                code_path=search_book_fn_path,
                additional_python_packages=additional_packages,
            )
        )
        infra.add_custom_setup_provisioning_step(
            lambda: load_python_lambda_to_s3(
                aws_client.s3,
                bucket_name=asset_bucket,
                key_name=SEARCH_UPDATE_KEY,
                code_path=search_update_fn_path,
                additional_python_packages=additional_packages,
            )
        )
    
        # CDK-based provisioning
        stack = cdk.Stack(infra.cdk_app, "BookstoreStack")
        books_api = BooksApi(
            stack,
            "BooksApi",
            search_key=SEARCH_KEY,
            search_update_key=SEARCH_UPDATE_KEY,
        )
    
        cdk.CfnOutput(stack, "BooksTableName", value=books_api.books_table.table_name)
        cdk.CfnOutput(stack, "SearchDomain", value=books_api.opensearch_domain.domain_endpoint)
        cdk.CfnOutput(stack, "SearchDomainName", value=books_api.opensearch_domain.domain_name)
        cdk.CfnOutput(stack, "GetBooksFn", value=books_api.get_book_fn.function_name)
        cdk.CfnOutput(stack, "ListBooksFn", value=books_api.list_books_fn.function_name)
        cdk.CfnOutput(stack, "InitBooksTableFn", value=books_api.load_books_helper_fn.function_name)
        cdk.CfnOutput(stack, "SearchForBooksFn", value=books_api.search_book_fn.function_name)
    
        # set skip_teardown=True to prevent the stack to be deleted
>       with infra.provisioner(skip_teardown=False) as prov:

../localstack/tests/aws/scenario/bookstore/test_bookstore.py:110: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/hostedtoolcache/Python/3.11.9/x64/lib/python3.11/contextlib.py:137: in __enter__
    return next(self.gen)
../localstack/localstack-core/localstack/testing/scenario/provisioning.py:128: in provisioner
    self.provision(skip_deployment=skip_deployment)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <localstack.testing.scenario.provisioning.InfraProvisioner object at 0x7fc16ef86410>
skip_deployment = False

    def provision(self, skip_deployment: Optional[bool] = False):
        """
        Execute all previously added custom provisioning steps and deploy added CDK stacks via CloudFormation.
    
        Already deployed stacks will be updated instead.
        """
        self._synth()
        if skip_deployment:
            LOG.debug("Skipping deployment. Assuming stacks have already been created")
            return
    
        is_update = False
    
        if all(
            self._is_stack_deployed(stack_name, stack)
            for stack_name, stack in self.cloudformation_stacks.items()
        ):
            LOG.debug("All stacks are already deployed. Skipping the provisioning.")
            # TODO: in localstack we might want to do a delete/create
            #  but generally this won't be a common use case when developing against LocalStack
            is_update = True
    
        self._bootstrap()
        self._run_manual_setup_tasks()
        for stack_name, stack in self.cloudformation_stacks.items():
            change_set_name = f"test-cs-{short_uid()}"
            if len(stack["Template"]) > CFN_MAX_TEMPLATE_SIZE:
                # if the template size is too big, we need to upload it to s3 first
                # and use TemplateURL instead to point to the template in s3
                template_bucket_name = self._template_bucket_name()
                self._create_bucket_if_not_exists(template_bucket_name)
                key = f"{stack_name}.yaml"
                self.aws_client.s3.put_object(
                    Bucket=template_bucket_name, Key=key, Body=stack["Template"]
                )
                url = self.aws_client.s3.generate_presigned_url(
                    ClientMethod="get_object",
                    Params={"Bucket": template_bucket_name, "Key": key},
                    ExpiresIn=10,
                )
    
                change_set = self.aws_client.cloudformation.create_change_set(
                    StackName=stack_name,
                    ChangeSetName=change_set_name,
                    TemplateURL=url,
                    ChangeSetType="UPDATE" if is_update else "CREATE",
                    Capabilities=[
                        Capability.CAPABILITY_AUTO_EXPAND,
                        Capability.CAPABILITY_IAM,
                        Capability.CAPABILITY_NAMED_IAM,
                    ],
                )
            else:
                change_set = self.aws_client.cloudformation.create_change_set(
                    StackName=stack_name,
                    ChangeSetName=change_set_name,
                    TemplateBody=stack["Template"],
                    ChangeSetType="UPDATE" if is_update else "CREATE",
                    Capabilities=[
                        Capability.CAPABILITY_AUTO_EXPAND,
                        Capability.CAPABILITY_IAM,
                        Capability.CAPABILITY_NAMED_IAM,
                    ],
                )
            stack_id = self.cloudformation_stacks[stack_name]["StackId"] = change_set["StackId"]
            try:
                self.aws_client.cloudformation.get_waiter("change_set_create_complete").wait(
                    ChangeSetName=change_set["Id"],
                    WaiterConfig=WAITER_CONFIG_AWS if is_aws_cloud() else WAITER_CONFIG_LS,
                )
            except WaiterError:
                # it's OK if we don't have any updates to perform here (!)
                # there is no specific error code unfortunately
                if not (is_update):
                    raise
                else:
                    LOG.warning("Execution of change set %s failed. Assuming no changes detected.")
            else:
                self.aws_client.cloudformation.execute_change_set(ChangeSetName=change_set["Id"])
                try:
                    self.aws_client.cloudformation.get_waiter(
                        "stack_update_complete" if is_update else "stack_create_complete"
                    ).wait(
                        StackName=stack_id,
                        WaiterConfig=WAITER_CONFIG_AWS if is_aws_cloud() else WAITER_CONFIG_LS,
                    )
    
                except WaiterError as e:
>                   raise StackDeployError(
                        self.aws_client.cloudformation.describe_stacks(StackName=stack_id)[
                            "Stacks"
                        ][0],
                        self.aws_client.cloudformation.describe_stack_events(StackName=stack_id)[
                            "StackEvents"
                        ],
                    ) from e
E                   localstack.testing.pytest.fixtures.StackDeployError: Describe output:
E                   {"StackId": "arn:aws:cloudformation:us-east-1:000000000000:stack/BookstoreStack/3a123e56", "StackName": "BookstoreStack", "ChangeSetId": "arn:aws:cloudformation:us-east-1:000000000000:changeSet/test-cs-a557bc82/c583d8db", "CreationTime": "2024-06-27T07:38:02.701Z", "LastUpdatedTime": "2024-06-27T07:38:02.701Z", "RollbackConfiguration": {}, "StackStatus": "CREATE_FAILED", "StackStatusReason": "An error occurred (ResourceAlreadyExistsException) when calling the CreateDomain operation: domain bookstorestack-domain66ac69e already exists in region us-east-1", "DisableRollback": false, "NotificationARNs": [], "Capabilities": ["CAPABILITY_AUTO_EXPAND", "CAPABILITY_IAM", "CAPABILITY_NAMED_IAM"], "Tags": [], "EnableTerminationProtection": false, "DriftInformation": {"StackDriftStatus": "NOT_CHECKED"}}
E                   Failing resources:
E                   - Domain66AC69E0 (AWS::OpenSearchService::Domain) -> CREATE_FAILED (An error occurred (ResourceAlreadyExistsException) when calling the CreateDomain operation: domain bookstorestack-domain66ac69e already exists in region us-east-1)
E                   - BookstoreStack (AWS::CloudFormation::Stack) -> CREATE_FAILED (An error occurred (ResourceAlreadyExistsException) when calling the CreateDomain operation: domain bookstorestack-domain66ac69e already exists in region us-east-1)

../localstack/localstack-core/localstack/testing/scenario/provisioning.py:225: StackDeployError

Check warning on line 0 in tests.aws.services.firehose.test_firehose.TestFirehoseIntegration

See this annotation in the file changed.

@github-actions github-actions / LocalStack Community integration with Pro

test_kinesis_firehose_elasticsearch_s3_backup (tests.aws.services.firehose.test_firehose.TestFirehoseIntegration) failed

pytest-junit-community-1.xml [took 7m 16s]
Raw output
assert False
 +  where False = poll_condition(<function TestFirehoseIntegration.test_kinesis_firehose_elasticsearch_s3_backup.<locals>.check_domain_state at 0x7fc0d806dda0>, 120, 1)
self = <tests.aws.services.firehose.test_firehose.TestFirehoseIntegration object at 0x7fc188545110>
s3_bucket = 'test-bucket-7b1d53d1'
kinesis_create_stream = <function kinesis_create_stream.<locals>._create_stream at 0x7fc0c5c2f2e0>
cleanups = [<function TestFirehoseIntegration.test_kinesis_firehose_elasticsearch_s3_backup.<locals>.<lambda> at 0x7fc0c1d8ba60>, <function TestFirehoseIntegration.test_kinesis_firehose_elasticsearch_s3_backup.<locals>.<lambda> at 0x7fc16e094fe0>]
aws_client = <localstack.aws.connect.ServiceLevelClientFactory object at 0x7fc18473b2d0>
account_id = '000000000000'

    @markers.skip_offline
    @markers.aws.unknown
    def test_kinesis_firehose_elasticsearch_s3_backup(
        self,
        s3_bucket,
        kinesis_create_stream,
        cleanups,
        aws_client,
        account_id,
    ):
        domain_name = f"test-domain-{short_uid()}"
        stream_name = f"test-stream-{short_uid()}"
        role_arn = f"arn:aws:iam::{account_id}:role/Firehose-Role"
        delivery_stream_name = f"test-delivery-stream-{short_uid()}"
        es_create_response = aws_client.es.create_elasticsearch_domain(DomainName=domain_name)
        cleanups.append(lambda: aws_client.es.delete_elasticsearch_domain(DomainName=domain_name))
        es_url = f"http://{es_create_response['DomainStatus']['Endpoint']}"
        es_arn = es_create_response["DomainStatus"]["ARN"]
    
        # create s3 backup bucket arn
        bucket_arn = arns.s3_bucket_arn(s3_bucket)
    
        # create kinesis stream
        kinesis_create_stream(StreamName=stream_name, ShardCount=2)
        stream_info = aws_client.kinesis.describe_stream(StreamName=stream_name)
        stream_arn = stream_info["StreamDescription"]["StreamARN"]
    
        kinesis_stream_source_def = {
            "KinesisStreamARN": stream_arn,
            "RoleARN": role_arn,
        }
        elasticsearch_destination_configuration = {
            "RoleARN": role_arn,
            "DomainARN": es_arn,
            "IndexName": "activity",
            "TypeName": "activity",
            "S3BackupMode": "AllDocuments",
            "S3Configuration": {
                "RoleARN": role_arn,
                "BucketARN": bucket_arn,
            },
        }
        aws_client.firehose.create_delivery_stream(
            DeliveryStreamName=delivery_stream_name,
            DeliveryStreamType="KinesisStreamAsSource",
            KinesisStreamSourceConfiguration=kinesis_stream_source_def,
            ElasticsearchDestinationConfiguration=elasticsearch_destination_configuration,
        )
        cleanups.append(
            lambda: aws_client.firehose.delete_delivery_stream(DeliveryStreamName=stream_name)
        )
    
        # wait for delivery stream to be ready
        def check_stream_state():
            stream = aws_client.firehose.describe_delivery_stream(
                DeliveryStreamName=delivery_stream_name
            )
            return stream["DeliveryStreamDescription"]["DeliveryStreamStatus"] == "ACTIVE"
    
        assert poll_condition(check_stream_state, 45, 1)
    
        # wait for ES cluster to be ready
        def check_domain_state():
            result = aws_client.es.describe_elasticsearch_domain(DomainName=domain_name)
            return not result["DomainStatus"]["Processing"]
    
        # if ElasticSearch is not yet installed, it might take some time to download the package before starting the domain
>       assert poll_condition(check_domain_state, 120, 1)
E       assert False
E        +  where False = poll_condition(<function TestFirehoseIntegration.test_kinesis_firehose_elasticsearch_s3_backup.<locals>.check_domain_state at 0x7fc0d806dda0>, 120, 1)

../localstack/tests/aws/services/firehose/test_firehose.py:205: AssertionError