diff --git a/.github/workflows/aws-tests.yml b/.github/workflows/aws-tests.yml index 8a6b69bc9ced6..d67f5f59c18e4 100644 --- a/.github/workflows/aws-tests.yml +++ b/.github/workflows/aws-tests.yml @@ -256,6 +256,34 @@ jobs: .coverage.unit retention-days: 30 + publish-preflight-test-results: + name: Publish Preflight- & Unit-Test Results + needs: test-preflight + runs-on: ubuntu-latest + permissions: + checks: write + pull-requests: write + contents: read + issues: read + # execute on success or failure, but not if the workflow is cancelled or any of the dependencies has been skipped + if: always() && !cancelled() && !contains(needs.*.result, 'skipped') + steps: + - name: Download Artifacts + uses: actions/download-artifact@v4 + with: + pattern: test-results-preflight + + - name: Publish Preflight- & Unit-Test Results + uses: EnricoMi/publish-unit-test-result-action@v2 + if: success() || failure() + with: + files: | + test-results-preflight/*.xml + check_name: "Test Results ${{ inputs.testAWSAccountId != '000000000000' && '(MA/MR) ' || ''}}- Preflight, Unit" + test_file_prefix: "-/opt/code/localstack/" + action_fail_on_inconclusive: true + + test-integration: name: "Integration Tests (${{ contains(matrix.runner, 'arm') && 'ARM64' || 'AMD64' }} - ${{ matrix.group }})" if: ${{ !inputs.onlyAcceptanceTests }} @@ -404,6 +432,54 @@ jobs: .coverage.bootstrap retention-days: 30 + publish-test-results: + name: Publish Test Results + strategy: + matrix: + runner: + - ubuntu-latest + - ubuntu-24.04-arm + exclude: + # skip the ARM integration tests in case we are not on the master and not on the upgrade-dependencies branch and forceARMTests is not set to true + - runner: ${{ (github.ref != 'refs/heads/master' && github.ref != 'upgrade-dependencies' && inputs.forceARMTests == false) && 'ubuntu-24.04-arm' || ''}} + needs: + - test-integration + - test-bootstrap + runs-on: ubuntu-latest + permissions: + checks: write + pull-requests: write + contents: read + issues: read + # execute on success or failure, but not if the workflow is cancelled or any of the dependencies has been skipped + if: always() && !cancelled() && !contains(needs.*.result, 'skipped') + steps: + - name: Determine Runner Architecture + shell: bash + run: echo "PLATFORM=${{ (runner.arch == 'X64' && 'amd64') || (runner.arch == 'ARM64' && 'arm64') || '' }}" >> $GITHUB_ENV + + - name: Download Bootstrap Artifacts + uses: actions/download-artifact@v4 + if: ${{ env.PLATFORM == 'amd64' }} + with: + pattern: test-results-bootstrap + + - name: Download Integration Artifacts + uses: actions/download-artifact@v4 + with: + pattern: test-results-integration-${{ env.PLATFORM }}-* + + - name: Publish Bootstrap and Integration Test Results + uses: EnricoMi/publish-unit-test-result-action@v2 + if: success() || failure() + with: + files: | + **/pytest-junit-*.xml + check_name: "Test Results (${{ env.PLATFORM }}${{ inputs.testAWSAccountId != '000000000000' && ', MA/MR' || ''}}) - Integration${{ env.PLATFORM == 'amd64' && ', Bootstrap' || ''}}" + test_file_prefix: "-/opt/code/localstack/" + action_fail_on_inconclusive: true + + test-acceptance: name: "Acceptance Tests (${{ contains(matrix.runner, 'arm') && 'ARM64' || 'AMD64' }}" needs: @@ -479,6 +555,46 @@ jobs: target/.coverage.acceptance-${{ env.PLATFORM }} retention-days: 30 + publish-acceptance-test-results: + name: Publish Acceptance Test Results + strategy: + matrix: + runner: + - ubuntu-latest + - ubuntu-24.04-arm + exclude: + # skip the ARM integration tests in case we are not on the master and not on the upgrade-dependencies branch and forceARMTests is not set to true + - runner: ${{ (github.ref != 'refs/heads/master' && github.ref != 'upgrade-dependencies' && inputs.forceARMTests == false) && 'ubuntu-24.04-arm' || ''}} + needs: + - test-acceptance + runs-on: ubuntu-latest + permissions: + checks: write + pull-requests: write + contents: read + issues: read + # execute on success or failure, but not if the workflow is cancelled or any of the dependencies has been skipped + if: always() && !cancelled() && !contains(needs.*.result, 'skipped') + steps: + - name: Determine Runner Architecture + shell: bash + run: echo "PLATFORM=${{ (runner.arch == 'X64' && 'amd64') || (runner.arch == 'ARM64' && 'arm64') || '' }}" >> $GITHUB_ENV + + - name: Download Acceptance Artifacts + uses: actions/download-artifact@v4 + with: + pattern: test-results-acceptance-${{ env.PLATFORM }} + + - name: Publish Acceptance Test Results + uses: EnricoMi/publish-unit-test-result-action@v2 + if: success() || failure() + with: + files: | + **/pytest-junit-*.xml + check_name: "Test Results (${{ env.PLATFORM }}${{ inputs.testAWSAccountId != '000000000000' && ', MA/MR' || ''}}) - Acceptance" + test_file_prefix: "-/opt/code/localstack/" + action_fail_on_inconclusive: true + test-cloudwatch-v1: name: Test CloudWatch V1 if: ${{ !inputs.onlyAcceptanceTests }} @@ -669,6 +785,52 @@ jobs: ${{ env.JUNIT_REPORTS_FILE }} retention-days: 30 + publish-alternative-provider-test-results: + name: Publish Alternative Provider Test Results + needs: + - test-cfn-v2-engine + - test-events-v1 + - test-ddb-v2 + - test-cloudwatch-v1 + runs-on: ubuntu-latest + permissions: + checks: write + pull-requests: write + contents: read + issues: read + # execute on success or failure, but not if the workflow is cancelled or any of the dependencies has been skipped + if: always() && !cancelled() && !contains(needs.*.result, 'skipped') + steps: + - name: Download Cloudformation v2 Artifacts + uses: actions/download-artifact@v4 + with: + pattern: test-results-cloudformation-v2 + + - name: Download Cloudformation v2 Artifacts + uses: actions/download-artifact@v4 + with: + pattern: test-results-events-v1 + + - name: Download Cloudformation v2 Artifacts + uses: actions/download-artifact@v4 + with: + pattern: test-results-dynamodb-v2 + + - name: Download Cloudformation v2 Artifacts + uses: actions/download-artifact@v4 + with: + pattern: test-results-cloudwatch-v1 + + - name: Publish Bootstrap and Integration Test Results + uses: EnricoMi/publish-unit-test-result-action@v2 + if: success() || failure() + with: + files: | + **/pytest-junit-*.xml + check_name: "Test Results ${{ inputs.testAWSAccountId != '000000000000' && '(MA/MR) ' || ''}}- Alternative Providers" + test_file_prefix: "-/opt/code/localstack/" + action_fail_on_inconclusive: true + capture-not-implemented: name: "Capture Not Implemented" if: ${{ !inputs.onlyAcceptanceTests && github.ref == 'refs/heads/master' }} diff --git a/Makefile b/Makefile index 42dc61324faed..114853d7bc399 100644 --- a/Makefile +++ b/Makefile @@ -93,7 +93,7 @@ start: ## Manually start the local infrastructure for testing docker-run-tests: ## Initializes the test environment and runs the tests in a docker container docker run -e LOCALSTACK_INTERNAL_TEST_COLLECT_METRIC=1 --entrypoint= -v `pwd`/.git:/opt/code/localstack/.git -v `pwd`/requirements-test.txt:/opt/code/localstack/requirements-test.txt -v `pwd`/tests/:/opt/code/localstack/tests/ -v `pwd`/dist/:/opt/code/localstack/dist/ -v `pwd`/target/:/opt/code/localstack/target/ -v /var/run/docker.sock:/var/run/docker.sock -v /tmp/localstack:/var/lib/localstack \ $(IMAGE_NAME):$(DEFAULT_TAG) \ - bash -c "make install-test && DEBUG=$(DEBUG) PYTEST_LOGLEVEL=$(PYTEST_LOGLEVEL) PYTEST_ARGS='$(PYTEST_ARGS)' COVERAGE_FILE='$(COVERAGE_FILE)' TEST_PATH='$(TEST_PATH)' LAMBDA_IGNORE_ARCHITECTURE=1 LAMBDA_INIT_POST_INVOKE_WAIT_MS=50 TINYBIRD_PYTEST_ARGS='$(TINYBIRD_PYTEST_ARGS)' TINYBIRD_DATASOURCE='$(TINYBIRD_DATASOURCE)' TINYBIRD_TOKEN='$(TINYBIRD_TOKEN)' TINYBIRD_URL='$(TINYBIRD_URL)' CI_REPOSITORY_NAME='$(CI_REPOSITORY_NAME)' CI_WORKFLOW_NAME='$(CI_WORKFLOW_NAME)' CI_COMMIT_BRANCH='$(CI_COMMIT_BRANCH)' CI_COMMIT_SHA='$(CI_COMMIT_SHA)' CI_JOB_URL='$(CI_JOB_URL)' CI_JOB_NAME='$(CI_JOB_NAME)' CI_JOB_ID='$(CI_JOB_ID)' CI='$(CI)' TEST_AWS_REGION_NAME='${TEST_AWS_REGION_NAME}' TEST_AWS_ACCESS_KEY_ID='${TEST_AWS_ACCESS_KEY_ID}' TEST_AWS_ACCOUNT_ID='${TEST_AWS_ACCOUNT_ID}' make test-coverage" + bash -c "make install-test && DEBUG=$(DEBUG) PYTEST_LOGLEVEL=$(PYTEST_LOGLEVEL) PYTEST_ARGS='$(PYTEST_ARGS)' COVERAGE_FILE='$(COVERAGE_FILE)' JUNIT_REPORTS_FILE=$(JUNIT_REPORTS_FILE) TEST_PATH='$(TEST_PATH)' LAMBDA_IGNORE_ARCHITECTURE=1 LAMBDA_INIT_POST_INVOKE_WAIT_MS=50 TINYBIRD_PYTEST_ARGS='$(TINYBIRD_PYTEST_ARGS)' TINYBIRD_DATASOURCE='$(TINYBIRD_DATASOURCE)' TINYBIRD_TOKEN='$(TINYBIRD_TOKEN)' TINYBIRD_URL='$(TINYBIRD_URL)' CI_REPOSITORY_NAME='$(CI_REPOSITORY_NAME)' CI_WORKFLOW_NAME='$(CI_WORKFLOW_NAME)' CI_COMMIT_BRANCH='$(CI_COMMIT_BRANCH)' CI_COMMIT_SHA='$(CI_COMMIT_SHA)' CI_JOB_URL='$(CI_JOB_URL)' CI_JOB_NAME='$(CI_JOB_NAME)' CI_JOB_ID='$(CI_JOB_ID)' CI='$(CI)' TEST_AWS_REGION_NAME='${TEST_AWS_REGION_NAME}' TEST_AWS_ACCESS_KEY_ID='${TEST_AWS_ACCESS_KEY_ID}' TEST_AWS_ACCOUNT_ID='${TEST_AWS_ACCOUNT_ID}' make test-coverage" docker-run-tests-s3-only: ## Initializes the test environment and runs the tests in a docker container for the S3 only image # TODO: We need node as it's a dependency of the InfraProvisioner at import time, remove when we do not need it anymore