diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json deleted file mode 100644 index bd03255..0000000 --- a/.devcontainer/devcontainer.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "name": "Go", - "image": "mcr.microsoft.com/devcontainers/go:0-1-bullseye", - "postCreateCommand": "go run .", - "forwardPorts": [], - "customizations": { - "codespaces": { - "openFiles": [ - "README.http" - ] - } - } -} diff --git a/.editorconfig b/.editorconfig deleted file mode 100644 index caa149d..0000000 --- a/.editorconfig +++ /dev/null @@ -1,14 +0,0 @@ -root = true - -[*] -end_of_line = lf -insert_final_newline = true -indent_style = tab -indent_size = 2 -trim_trailing_whitespace = true -charset = utf-8 - -[*.yaml] -indent_style = space -ij_yaml_spaces_within_braces = false -ij_yaml_spaces_within_brackets = false diff --git a/.github/dependabot.yml b/.github/dependabot.yml deleted file mode 100644 index 15c3cb1..0000000 --- a/.github/dependabot.yml +++ /dev/null @@ -1,11 +0,0 @@ -version: 2 -updates: -- package-ecosystem: "github-actions" - directory: "/" - schedule: - interval: "weekly" -- package-ecosystem: "gomod" - directory: / - schedule: - interval: "weekly" - diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml deleted file mode 100644 index de04cb8..0000000 --- a/.github/workflows/ci.yml +++ /dev/null @@ -1,70 +0,0 @@ -name: ci - -on: - push: - branches: - - 'main' - tags: - - 'v*' - pull_request: - branches: - - 'main' - -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true - -jobs: - extension-ci: - uses: steadybit/extension-kit/.github/workflows/reusable-extension-ci.yml@main # NOSONAR githubactions:S7637 - our own action - with: - go_version: '1.24' - runs_on: steadybit_runner_ubuntu_latest_4cores_16GB - build_linux_packages: true - run_make_prepare_audit: true - VERSION_BUMPER_APPID: ${{ vars.GH_APP_STEADYBIT_APP_ID }} - secrets: - SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }} - PAT_TOKEN_EXTENSION_DEPLOYER: ${{ secrets.PAT_TOKEN_EXTENSION_DEPLOYER }} - MAVEN_GPG_PRIVATE_KEY: ${{ secrets.MAVEN_GPG_PRIVATE_KEY }} - MAVEN_GPG_PRIVATE_KEY_PASSWORD: ${{ secrets.MAVEN_GPG_PRIVATE_KEY_PASSWORD }} - PAT_TOKEN_GORELEASER: ${{ secrets.PAT_TOKEN }} - SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }} - VERSION_BUMPER_SECRET: ${{ secrets.GH_APP_STEADYBIT_PRIVATE_KEY }} - GCP_ARTIFACT_REGISTRY_IDENTITY_PROVIDER: ${{ secrets.GCP_ARTIFACT_REGISTRY_IDENTITY_PROVIDER }} - - build-consumer-image: - name: Build Dummy Consumer Image - runs-on: ubuntu-latest - timeout-minutes: 60 - permissions: - contents: read - packages: write - steps: - - name: Checkout repository - uses: actions/checkout@v5 - with: - fetch-depth: 0 - - - name: Log in to the container registry - uses: docker/login-action@v3 # NOSONAR githubactions:S7637 - verified action creator - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - - name: Extract metadata (tags, labels) for Docker - id: meta - uses: docker/metadata-action@c1e51972afc2121e065aed6d45c65596fe445f3f - with: - images: ghcr.io/${{ github.repository }}/dummyconsumer - - - name: Build and push Docker image - id: push - uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 - with: - context: ./test-dataset/dummyconsumer/ - file: ./test-dataset/dummyconsumer/Dockerfile - push: true - tags: ghcr.io/${{ github.repository }}/dummyconsumer:main - labels: ${{ steps.meta.outputs.labels }} diff --git a/.github/workflows/cla.yml b/.github/workflows/cla.yml deleted file mode 100644 index 6830199..0000000 --- a/.github/workflows/cla.yml +++ /dev/null @@ -1,12 +0,0 @@ -name: "CLA Assistant" -on: - issue_comment: - types: [created] - pull_request_target: - types: [opened,closed,synchronize] - -jobs: - call-workflow: - uses: steadybit/.github/.github/workflows/cla.yml@main # NOSONAR githubactions:S7637 - our own action - secrets: - PERSONAL_ACCESS_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN_USED_BY_CLA_FROM_ANSGAR }} diff --git a/.github/workflows/dependabot-auto-merge.yml b/.github/workflows/dependabot-auto-merge.yml deleted file mode 100644 index 7ebfc26..0000000 --- a/.github/workflows/dependabot-auto-merge.yml +++ /dev/null @@ -1,23 +0,0 @@ -name: Dependabot auto-merge -on: pull_request - -permissions: - contents: write - pull-requests: write - -jobs: - dependabot: - runs-on: ubuntu-latest - if: ${{ github.actor == 'dependabot[bot]' }} - steps: - - name: Dependabot metadata - id: metadata - uses: dependabot/fetch-metadata@v2 # NOSONAR githubactions:S7637 - verified action creator - with: - github-token: "${{ secrets.GITHUB_TOKEN }}" - - name: Enable auto-merge for Dependabot PRs - if: ${{steps.metadata.outputs.package-ecosystem == 'github_actions' || (steps.metadata.outputs.update-type == 'version-update:semver-minor' || steps.metadata.outputs.update-type == 'version-update:semver-patch')}} - run: gh pr merge --auto --merge "$PR_URL" - env: - PR_URL: ${{github.event.pull_request.html_url}} - GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} diff --git a/.github/workflows/ghcr-cleanup.yml b/.github/workflows/ghcr-cleanup.yml deleted file mode 100644 index 0106770..0000000 --- a/.github/workflows/ghcr-cleanup.yml +++ /dev/null @@ -1,12 +0,0 @@ -name: ghcr-cleanup - -on: - workflow_dispatch: { } - schedule: - - cron: "0 8 * * 1" - -jobs: - ghcr-cleanup: - uses: steadybit/extension-kit/.github/workflows/reusable-ghcr-cleanup.yml@main # NOSONAR githubactions:S7637 - our own action - secrets: - token: ${{ secrets.GHCR_CLEANUP_PAT }} diff --git a/.gitignore b/.gitignore deleted file mode 100644 index cb16b49..0000000 --- a/.gitignore +++ /dev/null @@ -1,10 +0,0 @@ -/extension -/coverage.out -/e2e/e2e-coverage-docker.out -/gpg.key -/dist -/licenses -/snyk.sarif -.idea -.env -/pkg diff --git a/.gitpod.yml b/.gitpod.yml deleted file mode 100644 index 47486c4..0000000 --- a/.gitpod.yml +++ /dev/null @@ -1,16 +0,0 @@ -# see https://www.gitpod.io/docs/references/gitpod-yml -tasks: - - name: Setup & Build - before: go mod download - init: make build - command: make run - -ports: - - port: 8081 - onOpen: notify - name: Extension - description: Extension HTTP interface - -vscode: - extensions: - - "anweber.vscode-httpyac" diff --git a/.goreleaser.yaml b/.goreleaser.yaml deleted file mode 100644 index 97e4dbf..0000000 --- a/.goreleaser.yaml +++ /dev/null @@ -1,84 +0,0 @@ -project_name: steadybit-extension-kafka -version: 2 -before: - hooks: - - go mod download - - make licenses-report - -release: - prerelease: "false" - -git: - ignore_tags: - - steadybit-extension-kafka-* - -builds: - - binary: extension-kafka - env: - - CGO_ENABLED=0 - goos: - - linux - goarch: - - amd64 - - arm64 - flags: - - -cover={{ if index .Env "BUILD_WITH_COVERAGE" }}{{ .Env.BUILD_WITH_COVERAGE }}{{ else }}false{{ end }} - - -covermode=atomic - ldflags: - - -s -w - - -X github.com/steadybit/extension-kit/extbuild.ExtensionName={{.ProjectName}} - - -X github.com/steadybit/extension-kit/extbuild.Version={{.Version}} - - -X github.com/steadybit/extension-kit/extbuild.Revision={{.Commit}} - -archives: - - name_template: "{{ .ProjectName }}_{{ .Os }}_{{ .Arch }}" - -checksum: - name_template: 'checksums.txt' -snapshot: - version_template: "{{ incpatch .Version }}-{{ .Timestamp }}-next" -changelog: - sort: asc - filters: - exclude: - - '^docs:' - - '^test:' - -nfpms: - - package_name: "steadybit-extension-kafka" - file_name_template: "{{ .ConventionalFileName }}" - formats: - - deb - - rpm - maintainer: "Antoine Choimet " - description: | - Steadybit Extension Kafka - vendor: "steadybit GmbH" - homepage: "https://steadybit.com" - license: "Steadybit license" - builds: - - steadybit-extension-kafka - bindir: /opt/steadybit/extension-kafka - contents: - - src: ./linuxpkg/systemd - dst: /usr/lib/systemd/system - - src: ./linuxpkg/init.d - dst: /etc/init.d - - src: ./linuxpkg/config - dst: /etc - type: config - - src: ./licenses - dst: /opt/steadybit/extension-kafka/licenses - - scripts: - preinstall: ./linuxpkg/scripts/preinstall.sh - postinstall: ./linuxpkg/scripts/postinstall.sh - preremove: ./linuxpkg/scripts/preremove.sh - postremove: ./linuxpkg/scripts/postremove.sh - - rpm: - signature: - key_file: ./gpg.key - deb: - signature: - key_file: ./gpg.key diff --git a/.vscode/extensions.json b/.vscode/extensions.json deleted file mode 100644 index d3c7a24..0000000 --- a/.vscode/extensions.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "recommendations": ["anweber.vscode-httpyac"] -} \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md deleted file mode 100644 index d96c729..0000000 --- a/CHANGELOG.md +++ /dev/null @@ -1,68 +0,0 @@ -# Changelog - -## v1.2.4 - -- Support changing IO and network thread count values with huge increments or decrements -- Update dependencies - -## v1.2.3 - -- Bump app version - -## v1.2.2 - -- Bump app version - -## v1.2.1 - -- Add cluster name to topic and consumergroup targets - -## v1.2.0 - -- Add cluster name to broker target attributes -- Better target ID for brokers in case of multiple clusters -- Add min/max validations -- Update dependencies - -## v1.1.1 - -- Make extension-kafka compatible with AWS MSK SCRAM-SHA-512 Auth -- Add TLS for compatibility with SASL_SSL security protocol -- Update to go 1.24 -- Update dependencies - -## v1.1.0 - -- Fix log line for check error -- Change metric colors behavior -- Change name of kafka config for certs - -## v1.0.9 - -- Fix log line for check error -- Fix metric ID for broker check - -## v1.0.8 - -- Add pod and container enrichment - -## v1.0.7 - -- Fix action ID - -## v1.0.6 - -- Add controller information to target attributes -- Add new broker check -- Add TLS connection support -- Update dependencies - -## v1.0.5 - -- Use uid instead of name for user statement in Dockerfile -- Fix data race issue -- Update dependencies - -## v1.0.0 - - - Initial release diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md deleted file mode 100644 index 6729f67..0000000 --- a/CONTRIBUTING.md +++ /dev/null @@ -1,51 +0,0 @@ -# Contributing - -## Getting Started - -1. Clone the repository -2. `$ make tidy` -3. `$ make run` -4. `$ open http://localhost:8081` - -## Tasks - -The `Makefile` in the project root contains commands to easily run common admin tasks: - -| Command | Meaning | -|----------------|-------------------------------------------------------------------------------------------------------| -| `$ make tidy` | Format all code using `go fmt` and tidy the `go.mod` file. | -| `$ make audit` | Run `go vet`, `staticheck`, execute all tests and verify required modules. | -| `$ make build` | Build a binary for the extension. Creates a file called `extension` in the repository root directory. | -| `$ make run` | Build and then run the created binary. | - -## Releasing the Code/Docker Image - -To make a new release, do the following: - - 1. Update the `CHANGELOG.md` - 2. Commit and push the changelog changes. - 3. Set the tag `git tag -a vX.X.X -m vX.X.X` - 4. Push the tag. - -## Releasing Helm Chart Changes - - 1. Update the version number in the [Chart.yaml](./charts/steadybit-extension-kafka/Chart.yaml) - 2. Commit and push the changes. - -Changing the Helm chart without bumping the version will result in the following error: - -``` -> Releasing charts... - Error: error creating GitHub release steadybit-extension-kafka-1.0.0: POST https://api.github.com/repos/steadybit/extension-kafka/releases: 422 Validation Failed [{Resource:Release Field:tag_name Code:already_exists Message:}] -``` - -## Contributor License Agreement (CLA) - -In order to accept your pull request, we need you to submit a CLA. You only need to do this once. If you are submitting a pull request for the first time, just submit a Pull Request and our CLA Bot will give you instructions on how to sign the CLA before merging your Pull Request. - -All contributors must sign an [Individual Contributor License Agreement](https://github.com/steadybit/.github/blob/main/.github/cla/individual-cla.md). - -If contributing on behalf of your company, your company must sign a [Corporate Contributor License Agreement](https://github.com/steadybit/.github/blob/main/.github/cla/corporate-cla.md). If so, please contact us via office@steadybit.com. - -If for any reason, your first contribution is in a PR created by other contributor, please just add a comment to the PR -with the following text to agree our CLA: "I have read the CLA Document and I hereby sign the CLA". diff --git a/Dockerfile b/Dockerfile deleted file mode 100644 index 9bafb80..0000000 --- a/Dockerfile +++ /dev/null @@ -1,64 +0,0 @@ -# syntax=docker/dockerfile:1 - -## -## Build -## -FROM --platform=$BUILDPLATFORM golang:1.24-alpine AS build - -ARG TARGETOS -ARG TARGETARCH -ARG NAME -ARG VERSION -ARG REVISION -ARG ADDITIONAL_BUILD_PARAMS -ARG SKIP_LICENSES_REPORT=false -ARG VERSION=unknown -ARG REVISION=unknown - -WORKDIR /app - -RUN apk add build-base -COPY go.mod ./ -COPY go.sum ./ -RUN go mod download - -COPY . . - -RUN GOOS=$TARGETOS GOARCH=$TARGETARCH go build \ - -ldflags="\ - -X 'github.com/steadybit/extension-kit/extbuild.ExtensionName=${NAME}' \ - -X 'github.com/steadybit/extension-kit/extbuild.Version=${VERSION}' \ - -X 'github.com/steadybit/extension-kit/extbuild.Revision=${REVISION}'" \ - -o ./extension \ - ${ADDITIONAL_BUILD_PARAMS} -RUN make licenses-report - -## -## Runtime -## -FROM alpine:3.19 - -ARG VERSION=unknown -ARG REVISION=unknown - -LABEL "steadybit.com.discovery-disabled"="true" -LABEL "version"="${VERSION}" -LABEL "revision"="${REVISION}" -RUN echo "$VERSION" > /version.txt && echo "$REVISION" > /revision.txt - -ARG USERNAME=steadybit -ARG USER_UID=10000 - -RUN adduser -u $USER_UID -D $USERNAME - -USER $USER_UID - -WORKDIR / - -COPY --from=build /app/extension /extension -COPY --from=build /app/licenses /licenses - -EXPOSE 8083 -EXPOSE 8084 - -ENTRYPOINT ["/extension"] diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 57bac5b..0000000 --- a/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) {{TODO YEAR}} {{TODO ORGANIZATION NAME}} - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/Makefile b/Makefile deleted file mode 100755 index d9bad2a..0000000 --- a/Makefile +++ /dev/null @@ -1,110 +0,0 @@ -# ==================================================================================== # -# HELPERS -# ==================================================================================== # - -## help: print this help message -.PHONY: help -help: - @echo 'Usage:' - @sed -n 's/^##//p' ${MAKEFILE_LIST} | column -t -s ':' | sed -e 's/^/ /' - -## licenses-report: generate a report of all licenses -.PHONY: licenses-report -licenses-report: -ifeq ($(SKIP_LICENSES_REPORT), true) - @echo "Skipping licenses report" - rm -rf ./licenses && mkdir -p ./licenses -else - @echo "Generating licenses report" - rm -rf ./licenses - go run github.com/google/go-licenses@v1.6.0 save . --save_path ./licenses - go run github.com/google/go-licenses@v1.6.0 report . > ./licenses/THIRD-PARTY.csv - cp LICENSE ./licenses/LICENSE.txt -endif - -# ==================================================================================== # -# QUALITY CONTROL -# ==================================================================================== # - -## tidy: format code and tidy modfile -.PHONY: tidy -tidy: - go fmt ./... - go mod tidy -v - -## prepare_audit: install required kafkactl command for e2e tests, only intended for the CI runner -.PHONY: prepare_audit -prepare_audit: - wget https://github.com/deviceinsight/kafkactl/releases/download/v5.11.1/kafkactl_5.11.1_linux_amd64.deb - sudo dpkg -i kafkactl_5.11.1_linux_amd64.deb - -## audit: run quality control checks -.PHONY: audit -audit: - gofmt -l . - go vet ./... - go run honnef.co/go/tools/cmd/staticcheck@latest -checks=all,-SA1019,-ST1000,-ST1003,-U1000 ./... - go test -race -vet=off -coverprofile=coverage.out ./... - go mod verify - -## charttesting: Run Helm chart unit tests -.PHONY: charttesting -charttesting: - @set -e; \ - for dir in charts/steadybit-extension-*; do \ - echo "Unit Testing $$dir"; \ - helm unittest $$dir; \ - done - -## chartlint: Lint charts -.PHONY: chartlint -chartlint: - ct lint --config chartTesting.yaml - -## chart-bump-version: Bump the patch version and optionally set the appVersion -.PHONY: chart-bump-version -chart-bump-version: - @set -e; \ - for dir in charts/steadybit-extension-*; do \ - if [ ! -z "$(APP_VERSION)" ]; then \ - yq -i ".appVersion = strenv(APP_VERSION)" $$dir/Chart.yaml; \ - fi; \ - CHART_VERSION=$$(semver -i patch $$(yq '.version' $$dir/Chart.yaml)) \ - yq -i ".version = strenv(CHART_VERSION)" $$dir/Chart.yaml; \ - grep -e "^version:" -e "^appVersion:" $$dir/Chart.yaml; \ - done -# ==================================================================================== # -# BUILD -# ==================================================================================== # - -## build: build the extension -.PHONY: build -build: - go mod verify - go build -o=./extension - -## run: run the extension -.PHONY: run -run: tidy build - ./extension - -## container: build the container image -.PHONY: container -container: - docker build --build-arg ADDITIONAL_BUILD_PARAMS="-cover -covermode=atomic" --build-arg SKIP_LICENSES_REPORT="true" -t extension-kafka:latest . - -# ==================================================================================== # -# EJECT -# ==================================================================================== # - -## eject: remove / clear up files associated with the scaffold repository -.PHONY: eject -eject: - rm CHANGELOG.md - mv CHANGELOG.SCAFFOLD.md CHANGELOG.md - rm CONTRIBUTING.md - mv CONTRIBUTING.SCAFFOLD.md CONTRIBUTING.md - rm README.md - mv README.SCAFFOLD.md README.md - rm LICENSE - mv LICENSE.SCAFFOLD LICENSE diff --git a/README.md b/README.md deleted file mode 100755 index 0da65c7..0000000 --- a/README.md +++ /dev/null @@ -1,173 +0,0 @@ -# Steadybit extension-kafka - -A [Steadybit](https://www.steadybit.com/) extension to integrate [Kafka](https://kafka.apache.org/) into Steadybit. - -Learn about the capabilities of this extension in -our [Reliability Hub](https://hub.steadybit.com/extension/com.steadybit.extension_kafka). - -## Prerequisites - -The extension-kafka is using these capacities, thus may need elevated rights on kafka side : - -- List brokers / topics / consumer groups / offsets -- Elect leaders for partitions -- Alter broker configuration -- Create / Delete ACLs -- Delete Records - -## Configuration - -| Environment Variable | Helm value | Meaning | Required | Default | -|---------------------------------------------------------------------|------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------|----------|---------| -| `STEADYBIT_EXTENSION_SEED_BROKERS` | `kafka.seedBrokers` | Brokers hosts (without scheme) with port separated by comma (example: "localhost:9092,localhost:9093" | yes | | -| `STEADYBIT_EXTENSION_SASL_MECHANISM` | `kafka.auth.saslMechanism` | PLAIN, SCRAM-SHA-256, or SCRAM-SHA-512 | no | | -| `STEADYBIT_EXTENSION_SASL_USER` | `kafka.auth.saslUser` | Sasl User | no | | -| `STEADYBIT_EXTENSION_SASL_PASSWORD` | `kafka.auth.saslPassword` | Sasl Password | no | | -| `STEADYBIT_EXTENSION_KAFKA_CLUSTER_CERT_CHAIN_FILE` | `kafka.auth.kafkaClusterCertChainFile` | The client certificate in PEM format. | no | | -| `STEADYBIT_EXTENSION_KAFKA_CLUSTER_CERT_KEY_FILE` | `kafka.auth.kafkaClusterCertKeyFile` | The private key associated with the client certificate. | no | | -| `STEADYBIT_EXTENSION_KAFKA_CLUSTER_CA_FILE` | `kafka.auth.kafkaClusterCaFile` | The Certificate Authority (CA) certificate in PEM format. | no | | -| `STEADYBIT_EXTENSION_KAFKA_CONNECTION_USE_TLS` | `kafka.auth.useTLS` | Switch to "true" to use a TLS connection with default system certs, fill the certs fields above if you want to tune the tls connection. | no | | -| `STEADYBIT_EXTENSION_DISCOVERY_ATTRIBUTES_EXCLUDES_BROKERS` | `discovery.attributes.excludes.broker` | List of Broker Attributes which will be excluded during discovery. Checked by key equality and supporting trailing "*" | no | | -| `STEADYBIT_EXTENSION_DISCOVERY_ATTRIBUTES_EXCLUDES_TOPICS` | `discovery.attributes.excludes.topic` | List of Broker Attributes which will be excluded during discovery. Checked by key equality and supporting trailing "*" | no | | -| `STEADYBIT_EXTENSION_DISCOVERY_ATTRIBUTES_EXCLUDES_CONSUMER_GROUPS` | `discovery.attributes.excludes.consumer` | List of Broker Attributes which will be excluded during discovery. Checked by key equality and supporting trailing "*" | no | | - -The extension supports all environment variables provided -by [steadybit/extension-kit](https://github.com/steadybit/extension-kit#environment-variables). - -## Installation - -### Using Docker - -```sh -docker run \ - --rm \ - -p 8080 \ - --name steadybit-extension-kafka \ - --env STEADYBIT_EXTENSION_SEED_BROKERS="localhost:9092" \ - ghcr.io/steadybit/extension-kafka:latest -``` - -### Using Helm in Kubernetes - -```sh -helm repo add steadybit-extension-kafka https://steadybit.github.io/extension-kafka -helm repo update -helm upgrade steadybit-extension-kafka \ - --install \ - --wait \ - --timeout 5m0s \ - --create-namespace \ - --namespace steadybit-agent \ - --set kafka.seedBrokers="localhost:9092" \ - steadybit-extension-kafka/steadybit-extension-kafka -``` - -## Register the extension - -Make sure to register the extension on the Steadybit platform. Please refer to -the [documentation](https://docs.steadybit.com/integrate-with-steadybit/extensions/extension-installation) for more -information. - -## Generate pem files from truststore and keystore - -### Prerequisites - -- **Keystore file**: `kafka.keystore.jks` (contains the client certificate and private key). -- **Truststore file**: `kafka.truststore.jks` (contains the CA certificate). -- **Tools Required**: `keytool` and `openssl` must be installed. - ---- - -### Steps to Generate PEM Files - -1. **Export the CA Certificate (`ca-cert.pem`)** - Extract the CA certificate from the truststore using the following command: - -```bash -keytool -exportcert \ - -keystore kafka.truststore.jks \ - -alias CARoot \ - -storepass \ - -rfc -file ca-cert.pem -``` - -• Replace with the password for the truststore. -• The output file ca-cert.pem will contain the CA certificate in PEM format. - -2. **Convert the Keystore to PKCS12 Format** - -Convert the keystore to a PKCS12 file to facilitate extracting the certificate and private key: - -```bash -keytool -importkeystore \ --srckeystore kafka.keystore.jks \ --srcstorepass \ --srcalias kafka \ --destkeystore kafka-keystore.p12 \ --deststoretype PKCS12 \ --deststorepass -``` - -• Replace with the password for the keystore. -• Replace with a new password for the PKCS12 file. -• This will generate the file kafka-keystore.p12, which contains both the client certificate and private key. - -3. **Extract the Private Key (client-key.pem)** - Use the following command to extract the private key from the PKCS12 file: - -```bash - openssl pkcs12 -in kafka-keystore.p12 \ - -nocerts -nodes -out client-key.pem \ - -passin pass: -``` - -• Replace with the password set for the PKCS12 file. -• This will generate the file client-key.pem, which contains the private key in PEM format. - -4. **Extract the Client Certificate (client-cert.pem)** - Use the following command to extract the client certificate from the PKCS12 file: - -```bash - openssl pkcs12 -in kafka-keystore.p12 \ - -clcerts -nokeys -out client-cert.pem \ - -passin pass: -``` - -• Replace with the password set for the PKCS12 file. -• This will generate the file client-cert.pem, which contains the client certificate in PEM format. - -5. **(Optional) Verifying the Generated PEM Files** - -```bash -openssl x509 -in ca-cert.pem -text -noout -openssl rsa -in client-key.pem -check -openssl x509 -in client-cert.pem -text -noout -``` - -Ensure that: -• The CA certificate includes the correct issuer and validity period. -• The private key matches the client certificate. - -#### Issue: “Alias not found” - -Verify the contents of the keystore and truststore: - -```bash -keytool -list -v -keystore kafka.keystore.jks -storepass -keytool -list -v -keystore kafka.truststore.jks -storepass -``` - -#### Notes - -1. The private key (client-key.pem) must be kept secure. Unauthorized access to this file can compromise the client. -2. Ensure the Kafka broker’s hostname or IP address matches the Subject Alternative Name (SAN) in the server’s - certificate. -3. Always use strong passwords for your keystore, truststore, and PKCS12 files. - -## Version and Revision - -The version and revision of the extension: - -- are printed during the startup of the extension -- are added as a Docker label to the image -- are available via the `version.txt`/`revision.txt` files in the root of the image diff --git a/chartTesting.yaml b/chartTesting.yaml deleted file mode 100644 index 797e96d..0000000 --- a/chartTesting.yaml +++ /dev/null @@ -1,8 +0,0 @@ -# See https://github.com/helm/chart-testing#configuration -remote: origin -target-branch: main -chart-dirs: - - charts -chart-repos: - - steadybit=https://steadybit.github.io/helm-charts -helm-extra-args: --timeout 600s diff --git a/charts/steadybit-extension-kafka/.helmignore b/charts/steadybit-extension-kafka/.helmignore deleted file mode 100644 index 5b6e763..0000000 --- a/charts/steadybit-extension-kafka/.helmignore +++ /dev/null @@ -1,24 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*.orig -*~ -# Various IDEs -.project -.idea/ -*.tmproj -.vscode/ -tests/ \ No newline at end of file diff --git a/charts/steadybit-extension-kafka/Chart.lock b/charts/steadybit-extension-kafka/Chart.lock deleted file mode 100644 index ff40f7e..0000000 --- a/charts/steadybit-extension-kafka/Chart.lock +++ /dev/null @@ -1,6 +0,0 @@ -dependencies: -- name: extensionlib - repository: https://steadybit.github.io/helm-charts - version: 1.4.7 -digest: sha256:9a7feb18ae4c94e12dc7788d867b188e8606dc40c32433702dc4e4995462dd55 -generated: "2025-01-14T16:48:26.871028+01:00" diff --git a/charts/steadybit-extension-kafka/Chart.yaml b/charts/steadybit-extension-kafka/Chart.yaml deleted file mode 100644 index 4cb9f3b..0000000 --- a/charts/steadybit-extension-kafka/Chart.yaml +++ /dev/null @@ -1,25 +0,0 @@ -apiVersion: v2 -name: steadybit-extension-kafka -description: Steadybit scaffold extension Helm chart for Kubernetes. -version: 1.0.26 -appVersion: v1.2.4 -home: https://www.steadybit.com/ -icon: https://steadybit-website-assets.s3.amazonaws.com/logo-symbol-transparent.png -maintainers: - - email: antoine@steadybit.com - name: achoimet -sources: - - https://github.com/steadybit/extension-kafka -annotations: - artifacthub.io/images: | - - name: logo - image: https://steadybit-website-assets.s3.amazonaws.com/logo-symbol-transparent.png - artifacthub.io/links: |- - - name: Steadybit website - url: https://www.steadybit.com - - name: Steadybit reliability hub - url: https://hub.steadybit.com -dependencies: - - name: extensionlib - version: ^1.4.4 - repository: https://steadybit.github.io/helm-charts diff --git a/charts/steadybit-extension-kafka/charts/extensionlib-1.4.7.tgz b/charts/steadybit-extension-kafka/charts/extensionlib-1.4.7.tgz deleted file mode 100644 index 8bbc743..0000000 Binary files a/charts/steadybit-extension-kafka/charts/extensionlib-1.4.7.tgz and /dev/null differ diff --git a/charts/steadybit-extension-kafka/templates/_helpers.tpl b/charts/steadybit-extension-kafka/templates/_helpers.tpl deleted file mode 100644 index 5ccdd68..0000000 --- a/charts/steadybit-extension-kafka/templates/_helpers.tpl +++ /dev/null @@ -1,5 +0,0 @@ -{{/* vim: set filetype=mustache: */}} - -{{- define "kafka.auth.secret.name" -}} -{{- default "steadybit-extension-kafka" .Values.kafka.auth.existingSecret -}} -{{- end -}} diff --git a/charts/steadybit-extension-kafka/templates/deployment.yaml b/charts/steadybit-extension-kafka/templates/deployment.yaml deleted file mode 100644 index 3a35854..0000000 --- a/charts/steadybit-extension-kafka/templates/deployment.yaml +++ /dev/null @@ -1,157 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ include "extensionlib.names.fullname" . }} - namespace: {{ .Release.Namespace }} - {{ if .Values.deploymentAnnotations }} - annotations: - {{- with .Values.deploymentAnnotations }} - {{- toYaml . | nindent 4 }} - {{- end }} - {{- end }} - labels: - {{- range $key, $value := .Values.extraLabels }} - {{ $key }}: {{ $value }} - {{- end }} - {{- include "extensionlib.labels" (list .) | nindent 4 }} -spec: - replicas: 1 - selector: - matchLabels: - {{- include "extensionlib.selectorLabels" . | nindent 6 }} - template: - metadata: - labels: - {{- include "extensionlib.selectorLabels" . | nindent 8 }} - {{- include "extensionlib.labels" (list .) | nindent 8 }} - {{- range $key, $value := .Values.extraLabels }} - {{ $key }}: {{ $value }} - {{- end }} - {{- with .Values.podLabels }} - {{- toYaml . | nindent 8 }} - {{- end }} - annotations: - oneagent.dynatrace.com/injection: "false" - {{- with .Values.podAnnotations }} - {{- toYaml . | nindent 8 }} - {{- end }} - spec: - {{- with .Values.priorityClassName }} - priorityClassName: {{ . }} - {{- end }} - {{- with .Values.podSecurityContext }} - securityContext: - {{- toYaml . | nindent 8 }} - {{- end }} - containers: - - image: {{ .Values.image.name }}:{{ default .Chart.AppVersion .Values.image.tag }} - imagePullPolicy: {{ .Values.image.pullPolicy }} - name: extension - resources: - requests: - memory: {{ .Values.resources.requests.memory }} - cpu: {{ .Values.resources.requests.cpu }} - limits: - memory: {{ .Values.resources.limits.memory }} - cpu: {{ .Values.resources.limits.cpu }} - env: - {{- if .Values.discovery.attributes.excludes.broker }} - - name: STEADYBIT_EXTENSION_DISCOVERY_ATTRIBUTES_EXCLUDES_BROKERS - value: {{ join "," .Values.discovery.attributes.excludes.broker | quote }} - {{- end }} - {{- if .Values.discovery.attributes.excludes.topic }} - - name: STEADYBIT_EXTENSION_DISCOVERY_ATTRIBUTES_EXCLUDES_TOPICS - value: {{ join "," .Values.discovery.attributes.excludes.topic | quote }} - {{- end }} - {{- if .Values.discovery.attributes.excludes.consumer }} - - name: STEADYBIT_EXTENSION_DISCOVERY_ATTRIBUTES_EXCLUDES_CONSUMER_GROUPS - value: {{ join "," .Values.discovery.attributes.excludes.consumer | quote }} - {{- end }} - - name: STEADYBIT_EXTENSION_SEED_BROKERS - value: {{ .Values.kafka.seedBrokers }} - - name: STEADYBIT_EXTENSION_SASL_USER - valueFrom: - secretKeyRef: - name: {{ include "kafka.auth.secret.name" . }} - key: saslUser - - name: STEADYBIT_EXTENSION_SASL_PASSWORD - valueFrom: - secretKeyRef: - name: {{ include "kafka.auth.secret.name" . }} - key: saslPassword - - name: STEADYBIT_EXTENSION_SASL_MECHANISM - valueFrom: - secretKeyRef: - name: {{ include "kafka.auth.secret.name" . }} - key: saslMechanism - - name: STEADYBIT_EXTENSION_KAFKA_CLUSTER_CERT_CHAIN_FILE - valueFrom: - secretKeyRef: - name: {{ include "kafka.auth.secret.name" . }} - key: certChainFile - - name: STEADYBIT_EXTENSION_KAFKA_CLUSTER_CERT_KEY_FILE - valueFrom: - secretKeyRef: - name: {{ include "kafka.auth.secret.name" . }} - key: certKeyFile - - name: STEADYBIT_EXTENSION_KAFKA_CLUSTER_CA_FILE - valueFrom: - secretKeyRef: - name: {{ include "kafka.auth.secret.name" . }} - key: caFile - - name: STEADYBIT_EXTENSION_KAFKA_CONNECTION_USE_TLS - valueFrom: - secretKeyRef: - name: {{ include "kafka.auth.secret.name" . }} - key: useTLS - {{- include "extensionlib.deployment.env" (list .) | nindent 12 }} - {{- with .Values.extraEnv }} - {{- toYaml . | nindent 12 }} - {{- end }} - {{- with .Values.extraEnvFrom }} - envFrom: - {{- toYaml . | nindent 12 }} - {{- end }} - volumeMounts: - {{- include "extensionlib.deployment.volumeMounts" (list .) | nindent 12 }} - livenessProbe: - initialDelaySeconds: {{ .Values.probes.liveness.initialDelaySeconds }} - periodSeconds: {{ .Values.probes.liveness.periodSeconds }} - timeoutSeconds: {{ .Values.probes.liveness.timeoutSeconds }} - successThreshold: {{ .Values.probes.liveness.successThreshold }} - failureThreshold: {{ .Values.probes.liveness.failureThreshold }} - httpGet: - path: /health/liveness - port: 8084 - readinessProbe: - initialDelaySeconds: {{ .Values.probes.readiness.initialDelaySeconds }} - periodSeconds: {{ .Values.probes.readiness.periodSeconds }} - timeoutSeconds: {{ .Values.probes.readiness.timeoutSeconds }} - successThreshold: {{ .Values.probes.readiness.successThreshold }} - failureThreshold: {{ .Values.probes.readiness.failureThreshold }} - httpGet: - path: /health/readiness - port: 8084 - {{- with .Values.containerSecurityContext }} - securityContext: - {{- toYaml . | nindent 12 }} - {{- end }} - volumes: - {{- include "extensionlib.deployment.volumes" (list .) | nindent 8 }} - serviceAccountName: {{ .Values.serviceAccount.name }} - {{- with .Values.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.affinity }} - affinity: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.tolerations }} - tolerations: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.topologySpreadConstraints }} - topologySpreadConstraints: - {{- toYaml . | nindent 8 }} - {{- end }} diff --git a/charts/steadybit-extension-kafka/templates/secret.yaml b/charts/steadybit-extension-kafka/templates/secret.yaml deleted file mode 100644 index 4e5c33f..0000000 --- a/charts/steadybit-extension-kafka/templates/secret.yaml +++ /dev/null @@ -1,20 +0,0 @@ -{{- if not .Values.kafka.auth.existingSecret -}} -apiVersion: v1 -kind: Secret -metadata: - name: {{ include "kafka.auth.secret.name" . }} - namespace: {{ .Release.Namespace }} - labels: - {{- range $key, $value := .Values.extraLabels }} - {{ $key }}: {{ $value }} - {{- end }} -type: Opaque -data: - saslMechanism: {{ .Values.kafka.auth.saslMechanism | b64enc | quote }} - saslUser: {{ .Values.kafka.auth.saslUser | b64enc | quote }} - saslPassword: {{ .Values.kafka.auth.saslPassword | b64enc | quote }} - certChainFile: {{ .Values.kafka.auth.certChainFile | b64enc | quote }} - certKeyFile: {{ .Values.kafka.auth.certKeyFile | b64enc | quote }} - caFile: {{ .Values.kafka.auth.caFile | b64enc | quote }} - useTLS: {{ .Values.kafka.auth.useTLS | b64enc | quote }} -{{- end }} diff --git a/charts/steadybit-extension-kafka/templates/service.yaml b/charts/steadybit-extension-kafka/templates/service.yaml deleted file mode 100644 index 309df9f..0000000 --- a/charts/steadybit-extension-kafka/templates/service.yaml +++ /dev/null @@ -1 +0,0 @@ -{{- include "extensionlib.service" (list . 8083 (list "ACTION" "DISCOVERY")) -}} diff --git a/charts/steadybit-extension-kafka/templates/serviceaccount.yaml b/charts/steadybit-extension-kafka/templates/serviceaccount.yaml deleted file mode 100644 index b3ff940..0000000 --- a/charts/steadybit-extension-kafka/templates/serviceaccount.yaml +++ /dev/null @@ -1,12 +0,0 @@ -{{- if .Values.serviceAccount.create -}} -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ .Values.serviceAccount.name }} - namespace: {{ .Release.Namespace }} - labels: - {{- range $key, $value := .Values.extraLabels }} - {{ $key }}: {{ $value }} - {{- end }} -automountServiceAccountToken: true -{{- end }} diff --git a/charts/steadybit-extension-kafka/tests/__snapshot__/deployment_test.yaml.snap b/charts/steadybit-extension-kafka/tests/__snapshot__/deployment_test.yaml.snap deleted file mode 100644 index d2fd5d5..0000000 --- a/charts/steadybit-extension-kafka/tests/__snapshot__/deployment_test.yaml.snap +++ /dev/null @@ -1,1040 +0,0 @@ -manifest should match snapshot using podAnnotations and Labels: - 1: | - apiVersion: apps/v1 - kind: Deployment - metadata: - labels: - steadybit.com/discovery-disabled: "true" - steadybit.com/extension: "true" - name: RELEASE-NAME-steadybit-extension-kafka - namespace: NAMESPACE - spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/instance: RELEASE-NAME - app.kubernetes.io/name: steadybit-extension-kafka - template: - metadata: - annotations: - oneagent.dynatrace.com/injection: "false" - some-annotation: some-annotation-value - labels: - app.kubernetes.io/instance: RELEASE-NAME - app.kubernetes.io/name: steadybit-extension-kafka - some-label: some-label-value - steadybit.com/discovery-disabled: "true" - steadybit.com/extension: "true" - spec: - containers: - - env: - - name: STEADYBIT_EXTENSION_SEED_BROKERS - value: null - - name: STEADYBIT_EXTENSION_SASL_USER - valueFrom: - secretKeyRef: - key: saslUser - name: steadybit-extension-kafka - - name: STEADYBIT_EXTENSION_SASL_PASSWORD - valueFrom: - secretKeyRef: - key: saslPassword - name: steadybit-extension-kafka - - name: STEADYBIT_EXTENSION_SASL_MECHANISM - valueFrom: - secretKeyRef: - key: saslMechanism - name: steadybit-extension-kafka - - name: STEADYBIT_EXTENSION_KAFKA_CLUSTER_CERT_CHAIN_FILE - valueFrom: - secretKeyRef: - key: certChainFile - name: steadybit-extension-kafka - - name: STEADYBIT_EXTENSION_KAFKA_CLUSTER_CERT_KEY_FILE - valueFrom: - secretKeyRef: - key: certKeyFile - name: steadybit-extension-kafka - - name: STEADYBIT_EXTENSION_KAFKA_CLUSTER_CA_FILE - valueFrom: - secretKeyRef: - key: caFile - name: steadybit-extension-kafka - - name: STEADYBIT_EXTENSION_KAFKA_CONNECTION_USE_TLS - valueFrom: - secretKeyRef: - key: useTLS - name: steadybit-extension-kafka - - name: STEADYBIT_LOG_LEVEL - value: INFO - - name: STEADYBIT_LOG_FORMAT - value: text - image: ghcr.io/steadybit/extension-kafka:v0.0.0 - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 5 - httpGet: - path: /health/liveness - port: 8084 - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 5 - name: extension - readinessProbe: - failureThreshold: 3 - httpGet: - path: /health/readiness - port: 8084 - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 - resources: - limits: - cpu: 200m - memory: 32Mi - requests: - cpu: 50m - memory: 16Mi - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - readOnlyRootFilesystem: true - volumeMounts: null - securityContext: - runAsNonRoot: true - seccompProfile: - type: RuntimeDefault - serviceAccountName: steadybit-extension-kafka - volumes: null -manifest should match snapshot with TLS: - 1: | - apiVersion: apps/v1 - kind: Deployment - metadata: - labels: - steadybit.com/discovery-disabled: "true" - steadybit.com/extension: "true" - name: RELEASE-NAME-steadybit-extension-kafka - namespace: NAMESPACE - spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/instance: RELEASE-NAME - app.kubernetes.io/name: steadybit-extension-kafka - template: - metadata: - annotations: - oneagent.dynatrace.com/injection: "false" - labels: - app.kubernetes.io/instance: RELEASE-NAME - app.kubernetes.io/name: steadybit-extension-kafka - steadybit.com/discovery-disabled: "true" - steadybit.com/extension: "true" - spec: - containers: - - env: - - name: STEADYBIT_EXTENSION_SEED_BROKERS - value: null - - name: STEADYBIT_EXTENSION_SASL_USER - valueFrom: - secretKeyRef: - key: saslUser - name: steadybit-extension-kafka - - name: STEADYBIT_EXTENSION_SASL_PASSWORD - valueFrom: - secretKeyRef: - key: saslPassword - name: steadybit-extension-kafka - - name: STEADYBIT_EXTENSION_SASL_MECHANISM - valueFrom: - secretKeyRef: - key: saslMechanism - name: steadybit-extension-kafka - - name: STEADYBIT_EXTENSION_KAFKA_CLUSTER_CERT_CHAIN_FILE - valueFrom: - secretKeyRef: - key: certChainFile - name: steadybit-extension-kafka - - name: STEADYBIT_EXTENSION_KAFKA_CLUSTER_CERT_KEY_FILE - valueFrom: - secretKeyRef: - key: certKeyFile - name: steadybit-extension-kafka - - name: STEADYBIT_EXTENSION_KAFKA_CLUSTER_CA_FILE - valueFrom: - secretKeyRef: - key: caFile - name: steadybit-extension-kafka - - name: STEADYBIT_EXTENSION_KAFKA_CONNECTION_USE_TLS - valueFrom: - secretKeyRef: - key: useTLS - name: steadybit-extension-kafka - - name: STEADYBIT_LOG_LEVEL - value: INFO - - name: STEADYBIT_LOG_FORMAT - value: text - - name: STEADYBIT_EXTENSION_TLS_SERVER_CERT - value: /etc/extension/certificates/server-cert/tls.crt - - name: STEADYBIT_EXTENSION_TLS_SERVER_KEY - value: /etc/extension/certificates/server-cert/tls.key - image: ghcr.io/steadybit/extension-kafka:v0.0.0 - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 5 - httpGet: - path: /health/liveness - port: 8084 - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 5 - name: extension - readinessProbe: - failureThreshold: 3 - httpGet: - path: /health/readiness - port: 8084 - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 - resources: - limits: - cpu: 200m - memory: 32Mi - requests: - cpu: 50m - memory: 16Mi - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - readOnlyRootFilesystem: true - volumeMounts: - - mountPath: /etc/extension/certificates/server-cert - name: certificate-server-cert - readOnly: true - securityContext: - runAsNonRoot: true - seccompProfile: - type: RuntimeDefault - serviceAccountName: steadybit-extension-kafka - volumes: - - name: certificate-server-cert - secret: - optional: false - secretName: server-cert -manifest should match snapshot with extra env vars: - 1: | - apiVersion: apps/v1 - kind: Deployment - metadata: - labels: - steadybit.com/discovery-disabled: "true" - steadybit.com/extension: "true" - name: RELEASE-NAME-steadybit-extension-kafka - namespace: NAMESPACE - spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/instance: RELEASE-NAME - app.kubernetes.io/name: steadybit-extension-kafka - template: - metadata: - annotations: - oneagent.dynatrace.com/injection: "false" - labels: - app.kubernetes.io/instance: RELEASE-NAME - app.kubernetes.io/name: steadybit-extension-kafka - steadybit.com/discovery-disabled: "true" - steadybit.com/extension: "true" - spec: - containers: - - env: - - name: STEADYBIT_EXTENSION_SEED_BROKERS - value: null - - name: STEADYBIT_EXTENSION_SASL_USER - valueFrom: - secretKeyRef: - key: saslUser - name: steadybit-extension-kafka - - name: STEADYBIT_EXTENSION_SASL_PASSWORD - valueFrom: - secretKeyRef: - key: saslPassword - name: steadybit-extension-kafka - - name: STEADYBIT_EXTENSION_SASL_MECHANISM - valueFrom: - secretKeyRef: - key: saslMechanism - name: steadybit-extension-kafka - - name: STEADYBIT_EXTENSION_KAFKA_CLUSTER_CERT_CHAIN_FILE - valueFrom: - secretKeyRef: - key: certChainFile - name: steadybit-extension-kafka - - name: STEADYBIT_EXTENSION_KAFKA_CLUSTER_CERT_KEY_FILE - valueFrom: - secretKeyRef: - key: certKeyFile - name: steadybit-extension-kafka - - name: STEADYBIT_EXTENSION_KAFKA_CLUSTER_CA_FILE - valueFrom: - secretKeyRef: - key: caFile - name: steadybit-extension-kafka - - name: STEADYBIT_EXTENSION_KAFKA_CONNECTION_USE_TLS - valueFrom: - secretKeyRef: - key: useTLS - name: steadybit-extension-kafka - - name: STEADYBIT_LOG_LEVEL - value: INFO - - name: STEADYBIT_LOG_FORMAT - value: text - - name: FOO - value: bar - envFrom: - - configMapRef: null - name: env-configmap - - name: env-secrets - secretRef: null - image: ghcr.io/steadybit/extension-kafka:v0.0.0 - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 5 - httpGet: - path: /health/liveness - port: 8084 - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 5 - name: extension - readinessProbe: - failureThreshold: 3 - httpGet: - path: /health/readiness - port: 8084 - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 - resources: - limits: - cpu: 200m - memory: 32Mi - requests: - cpu: 50m - memory: 16Mi - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - readOnlyRootFilesystem: true - volumeMounts: null - securityContext: - runAsNonRoot: true - seccompProfile: - type: RuntimeDefault - serviceAccountName: steadybit-extension-kafka - volumes: null -manifest should match snapshot with extra labels: - 1: | - apiVersion: apps/v1 - kind: Deployment - metadata: - labels: - steadybit.com/discovery-disabled: "true" - steadybit.com/extension: "true" - tags.datadoghq.com/service: steadybit-extension-kafka - name: RELEASE-NAME-steadybit-extension-kafka - namespace: NAMESPACE - spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/instance: RELEASE-NAME - app.kubernetes.io/name: steadybit-extension-kafka - template: - metadata: - annotations: - oneagent.dynatrace.com/injection: "false" - labels: - app.kubernetes.io/instance: RELEASE-NAME - app.kubernetes.io/name: steadybit-extension-kafka - steadybit.com/discovery-disabled: "true" - steadybit.com/extension: "true" - tags.datadoghq.com/service: steadybit-extension-kafka - spec: - containers: - - env: - - name: STEADYBIT_EXTENSION_SEED_BROKERS - value: null - - name: STEADYBIT_EXTENSION_SASL_USER - valueFrom: - secretKeyRef: - key: saslUser - name: steadybit-extension-kafka - - name: STEADYBIT_EXTENSION_SASL_PASSWORD - valueFrom: - secretKeyRef: - key: saslPassword - name: steadybit-extension-kafka - - name: STEADYBIT_EXTENSION_SASL_MECHANISM - valueFrom: - secretKeyRef: - key: saslMechanism - name: steadybit-extension-kafka - - name: STEADYBIT_EXTENSION_KAFKA_CLUSTER_CERT_CHAIN_FILE - valueFrom: - secretKeyRef: - key: certChainFile - name: steadybit-extension-kafka - - name: STEADYBIT_EXTENSION_KAFKA_CLUSTER_CERT_KEY_FILE - valueFrom: - secretKeyRef: - key: certKeyFile - name: steadybit-extension-kafka - - name: STEADYBIT_EXTENSION_KAFKA_CLUSTER_CA_FILE - valueFrom: - secretKeyRef: - key: caFile - name: steadybit-extension-kafka - - name: STEADYBIT_EXTENSION_KAFKA_CONNECTION_USE_TLS - valueFrom: - secretKeyRef: - key: useTLS - name: steadybit-extension-kafka - - name: STEADYBIT_LOG_LEVEL - value: INFO - - name: STEADYBIT_LOG_FORMAT - value: text - image: ghcr.io/steadybit/extension-kafka:v0.0.0 - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 5 - httpGet: - path: /health/liveness - port: 8084 - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 5 - name: extension - readinessProbe: - failureThreshold: 3 - httpGet: - path: /health/readiness - port: 8084 - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 - resources: - limits: - cpu: 200m - memory: 32Mi - requests: - cpu: 50m - memory: 16Mi - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - readOnlyRootFilesystem: true - volumeMounts: null - securityContext: - runAsNonRoot: true - seccompProfile: - type: RuntimeDefault - serviceAccountName: steadybit-extension-kafka - volumes: null -manifest should match snapshot with mutual TLS: - 1: | - apiVersion: apps/v1 - kind: Deployment - metadata: - labels: - steadybit.com/discovery-disabled: "true" - steadybit.com/extension: "true" - name: RELEASE-NAME-steadybit-extension-kafka - namespace: NAMESPACE - spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/instance: RELEASE-NAME - app.kubernetes.io/name: steadybit-extension-kafka - template: - metadata: - annotations: - oneagent.dynatrace.com/injection: "false" - labels: - app.kubernetes.io/instance: RELEASE-NAME - app.kubernetes.io/name: steadybit-extension-kafka - steadybit.com/discovery-disabled: "true" - steadybit.com/extension: "true" - spec: - containers: - - env: - - name: STEADYBIT_EXTENSION_SEED_BROKERS - value: null - - name: STEADYBIT_EXTENSION_SASL_USER - valueFrom: - secretKeyRef: - key: saslUser - name: steadybit-extension-kafka - - name: STEADYBIT_EXTENSION_SASL_PASSWORD - valueFrom: - secretKeyRef: - key: saslPassword - name: steadybit-extension-kafka - - name: STEADYBIT_EXTENSION_SASL_MECHANISM - valueFrom: - secretKeyRef: - key: saslMechanism - name: steadybit-extension-kafka - - name: STEADYBIT_EXTENSION_KAFKA_CLUSTER_CERT_CHAIN_FILE - valueFrom: - secretKeyRef: - key: certChainFile - name: steadybit-extension-kafka - - name: STEADYBIT_EXTENSION_KAFKA_CLUSTER_CERT_KEY_FILE - valueFrom: - secretKeyRef: - key: certKeyFile - name: steadybit-extension-kafka - - name: STEADYBIT_EXTENSION_KAFKA_CLUSTER_CA_FILE - valueFrom: - secretKeyRef: - key: caFile - name: steadybit-extension-kafka - - name: STEADYBIT_EXTENSION_KAFKA_CONNECTION_USE_TLS - valueFrom: - secretKeyRef: - key: useTLS - name: steadybit-extension-kafka - - name: STEADYBIT_LOG_LEVEL - value: INFO - - name: STEADYBIT_LOG_FORMAT - value: text - - name: STEADYBIT_EXTENSION_TLS_SERVER_CERT - value: /etc/extension/certificates/server-cert/tls.crt - - name: STEADYBIT_EXTENSION_TLS_SERVER_KEY - value: /etc/extension/certificates/server-cert/tls.key - - name: STEADYBIT_EXTENSION_TLS_CLIENT_CAS - value: /etc/extension/certificates/client-cert-a/tls.crt,/etc/extension/certificates/client-cert-a/tls.crt - image: ghcr.io/steadybit/extension-kafka:v0.0.0 - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 5 - httpGet: - path: /health/liveness - port: 8084 - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 5 - name: extension - readinessProbe: - failureThreshold: 3 - httpGet: - path: /health/readiness - port: 8084 - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 - resources: - limits: - cpu: 200m - memory: 32Mi - requests: - cpu: 50m - memory: 16Mi - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - readOnlyRootFilesystem: true - volumeMounts: - - mountPath: /etc/extension/certificates/client-cert-a - name: certificate-client-cert-a - readOnly: true - - mountPath: /etc/extension/certificates/server-cert - name: certificate-server-cert - readOnly: true - securityContext: - runAsNonRoot: true - seccompProfile: - type: RuntimeDefault - serviceAccountName: steadybit-extension-kafka - volumes: - - name: certificate-client-cert-a - secret: - optional: false - secretName: client-cert-a - - name: certificate-server-cert - secret: - optional: false - secretName: server-cert -manifest should match snapshot with mutual TLS using containerPaths: - 1: | - apiVersion: apps/v1 - kind: Deployment - metadata: - labels: - steadybit.com/discovery-disabled: "true" - steadybit.com/extension: "true" - name: RELEASE-NAME-steadybit-extension-kafka - namespace: NAMESPACE - spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/instance: RELEASE-NAME - app.kubernetes.io/name: steadybit-extension-kafka - template: - metadata: - annotations: - oneagent.dynatrace.com/injection: "false" - labels: - app.kubernetes.io/instance: RELEASE-NAME - app.kubernetes.io/name: steadybit-extension-kafka - steadybit.com/discovery-disabled: "true" - steadybit.com/extension: "true" - spec: - containers: - - env: - - name: STEADYBIT_EXTENSION_SEED_BROKERS - value: null - - name: STEADYBIT_EXTENSION_SASL_USER - valueFrom: - secretKeyRef: - key: saslUser - name: steadybit-extension-kafka - - name: STEADYBIT_EXTENSION_SASL_PASSWORD - valueFrom: - secretKeyRef: - key: saslPassword - name: steadybit-extension-kafka - - name: STEADYBIT_EXTENSION_SASL_MECHANISM - valueFrom: - secretKeyRef: - key: saslMechanism - name: steadybit-extension-kafka - - name: STEADYBIT_EXTENSION_KAFKA_CLUSTER_CERT_CHAIN_FILE - valueFrom: - secretKeyRef: - key: certChainFile - name: steadybit-extension-kafka - - name: STEADYBIT_EXTENSION_KAFKA_CLUSTER_CERT_KEY_FILE - valueFrom: - secretKeyRef: - key: certKeyFile - name: steadybit-extension-kafka - - name: STEADYBIT_EXTENSION_KAFKA_CLUSTER_CA_FILE - valueFrom: - secretKeyRef: - key: caFile - name: steadybit-extension-kafka - - name: STEADYBIT_EXTENSION_KAFKA_CONNECTION_USE_TLS - valueFrom: - secretKeyRef: - key: useTLS - name: steadybit-extension-kafka - - name: STEADYBIT_LOG_LEVEL - value: INFO - - name: STEADYBIT_LOG_FORMAT - value: text - - name: STEADYBIT_EXTENSION_TLS_SERVER_CERT - value: /etc/tls/server.crt - - name: STEADYBIT_EXTENSION_TLS_SERVER_KEY - value: /etc/tls/server.key - - name: STEADYBIT_EXTENSION_TLS_CLIENT_CAS - value: /etc/tls/ca.crt,/etc/tls/ca2.crt - image: ghcr.io/steadybit/extension-kafka:v0.0.0 - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 5 - httpGet: - path: /health/liveness - port: 8084 - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 5 - name: extension - readinessProbe: - failureThreshold: 3 - httpGet: - path: /health/readiness - port: 8084 - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 - resources: - limits: - cpu: 200m - memory: 32Mi - requests: - cpu: 50m - memory: 16Mi - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - readOnlyRootFilesystem: true - volumeMounts: null - securityContext: - runAsNonRoot: true - seccompProfile: - type: RuntimeDefault - serviceAccountName: steadybit-extension-kafka - volumes: null -manifest should match snapshot with podSecurityContext: - 1: | - apiVersion: apps/v1 - kind: Deployment - metadata: - labels: - steadybit.com/discovery-disabled: "true" - steadybit.com/extension: "true" - name: RELEASE-NAME-steadybit-extension-kafka - namespace: NAMESPACE - spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/instance: RELEASE-NAME - app.kubernetes.io/name: steadybit-extension-kafka - template: - metadata: - annotations: - oneagent.dynatrace.com/injection: "false" - labels: - app.kubernetes.io/instance: RELEASE-NAME - app.kubernetes.io/name: steadybit-extension-kafka - steadybit.com/discovery-disabled: "true" - steadybit.com/extension: "true" - spec: - containers: - - env: - - name: STEADYBIT_EXTENSION_SEED_BROKERS - value: null - - name: STEADYBIT_EXTENSION_SASL_USER - valueFrom: - secretKeyRef: - key: saslUser - name: steadybit-extension-kafka - - name: STEADYBIT_EXTENSION_SASL_PASSWORD - valueFrom: - secretKeyRef: - key: saslPassword - name: steadybit-extension-kafka - - name: STEADYBIT_EXTENSION_SASL_MECHANISM - valueFrom: - secretKeyRef: - key: saslMechanism - name: steadybit-extension-kafka - - name: STEADYBIT_EXTENSION_KAFKA_CLUSTER_CERT_CHAIN_FILE - valueFrom: - secretKeyRef: - key: certChainFile - name: steadybit-extension-kafka - - name: STEADYBIT_EXTENSION_KAFKA_CLUSTER_CERT_KEY_FILE - valueFrom: - secretKeyRef: - key: certKeyFile - name: steadybit-extension-kafka - - name: STEADYBIT_EXTENSION_KAFKA_CLUSTER_CA_FILE - valueFrom: - secretKeyRef: - key: caFile - name: steadybit-extension-kafka - - name: STEADYBIT_EXTENSION_KAFKA_CONNECTION_USE_TLS - valueFrom: - secretKeyRef: - key: useTLS - name: steadybit-extension-kafka - - name: STEADYBIT_LOG_LEVEL - value: INFO - - name: STEADYBIT_LOG_FORMAT - value: text - image: ghcr.io/steadybit/extension-kafka:v0.0.0 - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 5 - httpGet: - path: /health/liveness - port: 8084 - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 5 - name: extension - readinessProbe: - failureThreshold: 3 - httpGet: - path: /health/readiness - port: 8084 - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 - resources: - limits: - cpu: 200m - memory: 32Mi - requests: - cpu: 50m - memory: 16Mi - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - readOnlyRootFilesystem: true - volumeMounts: null - securityContext: - runAsNonRoot: true - runAsUser: 2222 - seccompProfile: - type: RuntimeDefault - serviceAccountName: steadybit-extension-kafka - volumes: null -manifest should match snapshot with priority class: - 1: | - apiVersion: apps/v1 - kind: Deployment - metadata: - labels: - steadybit.com/discovery-disabled: "true" - steadybit.com/extension: "true" - name: RELEASE-NAME-steadybit-extension-kafka - namespace: NAMESPACE - spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/instance: RELEASE-NAME - app.kubernetes.io/name: steadybit-extension-kafka - template: - metadata: - annotations: - oneagent.dynatrace.com/injection: "false" - labels: - app.kubernetes.io/instance: RELEASE-NAME - app.kubernetes.io/name: steadybit-extension-kafka - steadybit.com/discovery-disabled: "true" - steadybit.com/extension: "true" - spec: - containers: - - env: - - name: STEADYBIT_EXTENSION_SEED_BROKERS - value: null - - name: STEADYBIT_EXTENSION_SASL_USER - valueFrom: - secretKeyRef: - key: saslUser - name: steadybit-extension-kafka - - name: STEADYBIT_EXTENSION_SASL_PASSWORD - valueFrom: - secretKeyRef: - key: saslPassword - name: steadybit-extension-kafka - - name: STEADYBIT_EXTENSION_SASL_MECHANISM - valueFrom: - secretKeyRef: - key: saslMechanism - name: steadybit-extension-kafka - - name: STEADYBIT_EXTENSION_KAFKA_CLUSTER_CERT_CHAIN_FILE - valueFrom: - secretKeyRef: - key: certChainFile - name: steadybit-extension-kafka - - name: STEADYBIT_EXTENSION_KAFKA_CLUSTER_CERT_KEY_FILE - valueFrom: - secretKeyRef: - key: certKeyFile - name: steadybit-extension-kafka - - name: STEADYBIT_EXTENSION_KAFKA_CLUSTER_CA_FILE - valueFrom: - secretKeyRef: - key: caFile - name: steadybit-extension-kafka - - name: STEADYBIT_EXTENSION_KAFKA_CONNECTION_USE_TLS - valueFrom: - secretKeyRef: - key: useTLS - name: steadybit-extension-kafka - - name: STEADYBIT_LOG_LEVEL - value: INFO - - name: STEADYBIT_LOG_FORMAT - value: text - image: ghcr.io/steadybit/extension-kafka:v0.0.0 - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 5 - httpGet: - path: /health/liveness - port: 8084 - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 5 - name: extension - readinessProbe: - failureThreshold: 3 - httpGet: - path: /health/readiness - port: 8084 - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 - resources: - limits: - cpu: 200m - memory: 32Mi - requests: - cpu: 50m - memory: 16Mi - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - readOnlyRootFilesystem: true - volumeMounts: null - priorityClassName: my-priority-class - securityContext: - runAsNonRoot: true - seccompProfile: - type: RuntimeDefault - serviceAccountName: steadybit-extension-kafka - volumes: null -manifest should match snapshot without TLS: - 1: | - apiVersion: apps/v1 - kind: Deployment - metadata: - labels: - steadybit.com/discovery-disabled: "true" - steadybit.com/extension: "true" - name: RELEASE-NAME-steadybit-extension-kafka - namespace: NAMESPACE - spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/instance: RELEASE-NAME - app.kubernetes.io/name: steadybit-extension-kafka - template: - metadata: - annotations: - oneagent.dynatrace.com/injection: "false" - labels: - app.kubernetes.io/instance: RELEASE-NAME - app.kubernetes.io/name: steadybit-extension-kafka - steadybit.com/discovery-disabled: "true" - steadybit.com/extension: "true" - spec: - containers: - - env: - - name: STEADYBIT_EXTENSION_SEED_BROKERS - value: null - - name: STEADYBIT_EXTENSION_SASL_USER - valueFrom: - secretKeyRef: - key: saslUser - name: steadybit-extension-kafka - - name: STEADYBIT_EXTENSION_SASL_PASSWORD - valueFrom: - secretKeyRef: - key: saslPassword - name: steadybit-extension-kafka - - name: STEADYBIT_EXTENSION_SASL_MECHANISM - valueFrom: - secretKeyRef: - key: saslMechanism - name: steadybit-extension-kafka - - name: STEADYBIT_EXTENSION_KAFKA_CLUSTER_CERT_CHAIN_FILE - valueFrom: - secretKeyRef: - key: certChainFile - name: steadybit-extension-kafka - - name: STEADYBIT_EXTENSION_KAFKA_CLUSTER_CERT_KEY_FILE - valueFrom: - secretKeyRef: - key: certKeyFile - name: steadybit-extension-kafka - - name: STEADYBIT_EXTENSION_KAFKA_CLUSTER_CA_FILE - valueFrom: - secretKeyRef: - key: caFile - name: steadybit-extension-kafka - - name: STEADYBIT_EXTENSION_KAFKA_CONNECTION_USE_TLS - valueFrom: - secretKeyRef: - key: useTLS - name: steadybit-extension-kafka - - name: STEADYBIT_LOG_LEVEL - value: INFO - - name: STEADYBIT_LOG_FORMAT - value: text - image: ghcr.io/steadybit/extension-kafka:v0.0.0 - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 5 - httpGet: - path: /health/liveness - port: 8084 - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 5 - name: extension - readinessProbe: - failureThreshold: 3 - httpGet: - path: /health/readiness - port: 8084 - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 - resources: - limits: - cpu: 200m - memory: 32Mi - requests: - cpu: 50m - memory: 16Mi - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - readOnlyRootFilesystem: true - volumeMounts: null - securityContext: - runAsNonRoot: true - seccompProfile: - type: RuntimeDefault - serviceAccountName: steadybit-extension-kafka - volumes: null diff --git a/charts/steadybit-extension-kafka/tests/__snapshot__/service_test.yaml.snap b/charts/steadybit-extension-kafka/tests/__snapshot__/service_test.yaml.snap deleted file mode 100644 index f76c987..0000000 --- a/charts/steadybit-extension-kafka/tests/__snapshot__/service_test.yaml.snap +++ /dev/null @@ -1,92 +0,0 @@ -manifest should match snapshot with TLS: - 1: | - apiVersion: v1 - kind: Service - metadata: - annotations: - steadybit.com/extension-auto-discovery: | - {"extensions":[{"port":8083,"protocol":"https","types":["ACTION","DISCOVERY"]}]} - steadybit.com/extension-auto-registration: | - {"extensions":[{"port":8083,"protocol":"https"}]} - labels: null - name: RELEASE-NAME-steadybit-extension-kafka - namespace: NAMESPACE - spec: - ports: - - appProtocol: tcp - name: tcp-app - port: 8083 - protocol: TCP - targetPort: 8083 - selector: - app.kubernetes.io/instance: RELEASE-NAME - app.kubernetes.io/name: steadybit-extension-kafka -manifest should match snapshot with mutual TLS: - 1: | - apiVersion: v1 - kind: Service - metadata: - annotations: - steadybit.com/extension-auto-discovery: | - {"extensions":[{"port":8083,"protocol":"https","types":["ACTION","DISCOVERY"]}]} - steadybit.com/extension-auto-registration: | - {"extensions":[{"port":8083,"protocol":"https"}]} - labels: null - name: RELEASE-NAME-steadybit-extension-kafka - namespace: NAMESPACE - spec: - ports: - - appProtocol: tcp - name: tcp-app - port: 8083 - protocol: TCP - targetPort: 8083 - selector: - app.kubernetes.io/instance: RELEASE-NAME - app.kubernetes.io/name: steadybit-extension-kafka -manifest should match snapshot with mutual TLS using containerPaths: - 1: | - apiVersion: v1 - kind: Service - metadata: - annotations: - steadybit.com/extension-auto-discovery: | - {"extensions":[{"port":8083,"protocol":"https","types":["ACTION","DISCOVERY"]}]} - steadybit.com/extension-auto-registration: | - {"extensions":[{"port":8083,"protocol":"https"}]} - labels: null - name: RELEASE-NAME-steadybit-extension-kafka - namespace: NAMESPACE - spec: - ports: - - appProtocol: tcp - name: tcp-app - port: 8083 - protocol: TCP - targetPort: 8083 - selector: - app.kubernetes.io/instance: RELEASE-NAME - app.kubernetes.io/name: steadybit-extension-kafka -manifest should match snapshot without TLS: - 1: | - apiVersion: v1 - kind: Service - metadata: - annotations: - steadybit.com/extension-auto-discovery: | - {"extensions":[{"port":8083,"protocol":"http","types":["ACTION","DISCOVERY"]}]} - steadybit.com/extension-auto-registration: | - {"extensions":[{"port":8083,"protocol":"http"}]} - labels: null - name: RELEASE-NAME-steadybit-extension-kafka - namespace: NAMESPACE - spec: - ports: - - appProtocol: tcp - name: tcp-app - port: 8083 - protocol: TCP - targetPort: 8083 - selector: - app.kubernetes.io/instance: RELEASE-NAME - app.kubernetes.io/name: steadybit-extension-kafka diff --git a/charts/steadybit-extension-kafka/tests/__snapshot__/serviceaccount_test.yaml.snap b/charts/steadybit-extension-kafka/tests/__snapshot__/serviceaccount_test.yaml.snap deleted file mode 100644 index 7abe93f..0000000 --- a/charts/steadybit-extension-kafka/tests/__snapshot__/serviceaccount_test.yaml.snap +++ /dev/null @@ -1,9 +0,0 @@ -manifest should match snapshot: - 1: | - apiVersion: v1 - automountServiceAccountToken: true - kind: ServiceAccount - metadata: - labels: null - name: steadybit-extension-kafka - namespace: NAMESPACE diff --git a/charts/steadybit-extension-kafka/tests/deployment_test.yaml b/charts/steadybit-extension-kafka/tests/deployment_test.yaml deleted file mode 100644 index a5a2ade..0000000 --- a/charts/steadybit-extension-kafka/tests/deployment_test.yaml +++ /dev/null @@ -1,82 +0,0 @@ -templates: - - deployment.yaml -chart: - appVersion: v0.0.0 -tests: - - it: manifest should match snapshot without TLS - asserts: - - matchSnapshot: { } - - it: manifest should match snapshot using podAnnotations and Labels - set: - podAnnotations: - some-annotation: "some-annotation-value" - podLabels: - some-label: "some-label-value" - asserts: - - matchSnapshot: { } - - it: manifest should match snapshot with TLS - set: - tls: - server: - certificate: - fromSecret: server-cert - asserts: - - matchSnapshot: { } - - it: manifest should match snapshot with mutual TLS - set: - tls: - server: - certificate: - fromSecret: server-cert - client: - certificates: - fromSecrets: - - client-cert-a - - client-cert-a - asserts: - - matchSnapshot: { } - - it: manifest should match snapshot with mutual TLS using containerPaths - set: - tls: - server: - certificate: - path: /etc/tls/server.crt - key: - path: /etc/tls/server.key - client: - certificates: - paths: - - /etc/tls/ca.crt - - /etc/tls/ca2.crt - asserts: - - matchSnapshot: {} - - it: manifest should match snapshot with extra env vars - set: - extraEnv: - - name: FOO - value: "bar" - extraEnvFrom: - - configMapRef: - name: env-configmap - - secretRef: - name: env-secrets - asserts: - - matchSnapshot: {} - - it: manifest should match snapshot with extra labels - set: - extraLabels: - tags.datadoghq.com/service: steadybit-extension-kafka - asserts: - - matchSnapshot: {} - - it: manifest should match snapshot with podSecurityContext - set: - podSecurityContext: - runAsUser: 2222 - asserts: - - matchSnapshot: {} - - - it: manifest should match snapshot with priority class - set: - priorityClassName: my-priority-class - asserts: - - matchSnapshot: {} diff --git a/charts/steadybit-extension-kafka/tests/service_test.yaml b/charts/steadybit-extension-kafka/tests/service_test.yaml deleted file mode 100644 index 27aa7bb..0000000 --- a/charts/steadybit-extension-kafka/tests/service_test.yaml +++ /dev/null @@ -1,42 +0,0 @@ -templates: - - service.yaml -tests: - - it: manifest should match snapshot without TLS - asserts: - - matchSnapshot: {} - - it: manifest should match snapshot with TLS - set: - tls: - server: - certificate: - fromSecret: server-cert - asserts: - - matchSnapshot: {} - - it: manifest should match snapshot with mutual TLS - set: - tls: - server: - certificate: - fromSecret: server-cert - client: - certificates: - fromSecrets: - - client-cert-a - - client-cert-a - asserts: - - matchSnapshot: {} - - it: manifest should match snapshot with mutual TLS using containerPaths - set: - tls: - server: - certificate: - path: /etc/tls/server.crt - key: - path: /etc/tls/server.key - client: - certificates: - paths: - - /etc/tls/ca.crt - - /etc/tls/ca2.crt - asserts: - - matchSnapshot: {} diff --git a/charts/steadybit-extension-kafka/tests/serviceaccount_test.yaml b/charts/steadybit-extension-kafka/tests/serviceaccount_test.yaml deleted file mode 100644 index 0f1fe40..0000000 --- a/charts/steadybit-extension-kafka/tests/serviceaccount_test.yaml +++ /dev/null @@ -1,6 +0,0 @@ -templates: - - serviceaccount.yaml -tests: - - it: manifest should match snapshot - asserts: - - matchSnapshot: { } diff --git a/charts/steadybit-extension-kafka/values.yaml b/charts/steadybit-extension-kafka/values.yaml deleted file mode 100644 index 0e6ec23..0000000 --- a/charts/steadybit-extension-kafka/values.yaml +++ /dev/null @@ -1,152 +0,0 @@ -# Default values for steadybit-extension-kafka. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. -image: - # image.name -- The container image to use for the Steadybit scaffold extension. - name: ghcr.io/steadybit/extension-kafka - # image.tag -- tag name of the extension-kafka container image to use. Defaults to appVersion of this chart. - # See https://github.com/steadybit/extension-kafka/pkgs/container/extension-kafka/versions for all tags. - tag: null - # image.pullPolicy -- Specifies when to pull the image container. - pullPolicy: IfNotPresent - -kafka: - seedBrokers: "" - auth: - saslMechanism: "" - saslUser: "" - saslPassword: "" - # kafka.auth.existingSecret -- If defined, will skip secret creation and instead assume that the referenced secret contains all the necessary auth properties. - existingSecret: null - certChainFile: "" - certKeyFile: "" - caFile: "" - useTLS: "" - -tls: - server: - certificate: - # tls.server.certificate.fromSecret -- The name of the secret containing the TLS certificate for the extension. - # The extension will then create an HTTPS server instead of an HTTP server. - fromSecret: null - # tls.server.certificate.path --Path to the TLS certificate for the extension. - path: null - key: - # tls.server.certificate.key-path --Path to the key for the TLS certificate for the extension. - path: null - client: - certificates: - # tls.client.certificates.fromSecrets -- List of secret names containing TLS certificates for the extension to trust. - # The extension will require clients to authenticate using one of these certificates. In essence, this will enable mutual TLS. - fromSecrets: [] - # tls.client.certificates.paths -- List paths containing TLS certificates for the extension to trust. - # The extension will require clients to authenticate using one of these certificates. In essence, this will enable mutual TLS. - paths: [] - -logging: - # logging.level -- The active log level. Valid values are: TRACE, DEBUG, INFO, WARN, ERROR - level: INFO - # logging.format -- The format of the log entries. One of text, json - format: text - -probes: - # probes.readiness.* -- Configuration of the Kubernetes readiness probe - readiness: - initialDelaySeconds: 10 - periodSeconds: 10 - timeoutSeconds: 1 - failureThreshold: 3 - successThreshold: 1 - # probes.liveness.* -- Configuration of the Kubernetes liveness probe - liveness: - initialDelaySeconds: 10 - periodSeconds: 10 - timeoutSeconds: 5 - failureThreshold: 5 - successThreshold: 1 - -resources: - requests: - # resources.requests.memory -- The minimal amount of memory needed - memory: "16Mi" - # resources.requests.cpu -- The minimal amount of cpu shares needed - cpu: "50m" - limits: - # resources.limits.memory -- The limit of memory to be used - memory: "32Mi" - # resources.limits.cpu -- The limit of cpu share to be used during its interval - cpu: "200m" - -serviceAccount: - # serviceAccount.create -- Specifies whether a ServiceAccount should be created. - create: true - # serviceAccount.name -- The name of the ServiceAccount to use. - name: steadybit-extension-kafka - -# extra labels to apply to the Kubernetes resources -extraLabels: {} - -# deploymentAnnotations: Additional annotations to be added to the deployment. -deploymentAnnotations: {} - -# podAnnotations -- Additional annotations to be added to the pods. -podAnnotations: {} - -# podLabels -- Additional labels to be added to the pods. -podLabels: {} - -# nodeSelector -- Node labels for pod assignment -nodeSelector: {} - -# tolerations -- Tolerations to influence pod assignment -tolerations: [] - -# topologySpreadConstraints -- Spread constraints to influence pod assignment. -# https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ -topologySpreadConstraints: [] - -# affinity -- Affinities to influence pod assignment. -affinity: {} - -# priorityClassName -- Priority class used to influence pod scheduling priority. -priorityClassName: null - -# podSecurityContext -- SecurityContext to apply to the pod. -podSecurityContext: - seccompProfile: - type: RuntimeDefault - runAsNonRoot: true - -# containerSecurityContext -- SecurityContext to apply to the container. -containerSecurityContext: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - -# extraEnv -- Array with extra environment variables to add to the container -# e.g: -# extraEnv: -# - name: FOO -# value: "bar" -extraEnv: [] - -# extraEnvFrom -- Array with extra environment variables sources to add to the container -# e.g: -# extraEnvFrom: -# - configMapRef: -# name: env-configmap -# - secretRef: -# name: env-secrets -extraEnvFrom: [] - -discovery: - attributes: - excludes: - # discovery.attributes.excludes.broker -- List of attributes to exclude from Kafka Broker discovery. - broker: [] - # discovery.attributes.excludes.topic -- List of attributes to exclude from Kafka Topic discovery. - topic: [] - # discovery.attributes.excludes.consumer-group -- List of attributes to exclude from Kafka Consumer Group discovery. - consumer: [] diff --git a/config/config.go b/config/config.go deleted file mode 100644 index 5a7ab29..0000000 --- a/config/config.go +++ /dev/null @@ -1,44 +0,0 @@ -// SPDX-License-Identifier: MIT -// SPDX-FileCopyrightText: 2025 Steadybit GmbH - -package config - -import ( - "github.com/kelseyhightower/envconfig" - "github.com/rs/zerolog/log" -) - -// Specification is the configuration specification for the extension. Configuration values can be applied -// through environment variables. Learn more through the documentation of the envconfig package. -// https://github.com/kelseyhightower/envconfig -type Specification struct { - SeedBrokers string `json:"seedBrokers" required:"true" split_words:"true"` - SaslMechanism string `json:"saslMechanism" required:"false" split_words:"true"` - SaslUser string `json:"saslUser" required:"false" split_words:"true"` - SaslPassword string `json:"saslPassword" required:"false" split_words:"true"` - KafkaConnectionUseTLS string `json:"kafkaConnectionUseTLS" required:"false" split_words:"true"` - KafkaClusterCertChainFile string `json:"kafkaClusterCertChainFile" required:"false" split_words:"true"` - KafkaClusterCertKeyFile string `json:"kafkaClusterCertKeyFile" required:"false" split_words:"true"` - KafkaClusterCaFile string `json:"kafkaClusterCaFile" required:"false" split_words:"true"` - DiscoveryIntervalConsumerGroup int `json:"discoveryIntervalKafkaConsumerGroup" split_words:"true" required:"false" default:"30"` - DiscoveryIntervalKafkaBroker int `json:"discoveryIntervalKafkaBroker" split_words:"true" required:"false" default:"30"` - DiscoveryIntervalKafkaTopic int `json:"discoveryIntervalKafkaTopic" split_words:"true" required:"false" default:"30"` - DiscoveryAttributesExcludesBrokers []string `json:"discoveryAttributesExcludesBrokers" split_words:"true" required:"false"` - DiscoveryAttributesExcludesTopics []string `json:"discoveryAttributesExcludesTopics" split_words:"true" required:"false"` - DiscoveryAttributesExcludesConsumerGroups []string `json:"discoveryAttributesExcludesConsumerGroups" split_words:"true" required:"false"` -} - -var ( - Config Specification -) - -func ParseConfiguration() { - err := envconfig.Process("steadybit_extension", &Config) - if err != nil { - log.Fatal().Err(err).Msgf("Failed to parse configuration from environment.") - } -} - -func ValidateConfiguration() { - // You may optionally validate the configuration here. -} diff --git a/e2e/integration_test.go b/e2e/integration_test.go deleted file mode 100644 index 7faeaf5..0000000 --- a/e2e/integration_test.go +++ /dev/null @@ -1,372 +0,0 @@ -// SPDX-License-Identifier: MIT -// SPDX-FileCopyrightText: 2025 Steadybit GmbH - -package e2e - -import ( - "context" - "fmt" - "github.com/rs/zerolog/log" - "github.com/steadybit/action-kit/go/action_kit_api/v2" - "github.com/steadybit/action-kit/go/action_kit_test/client" - "github.com/steadybit/action-kit/go/action_kit_test/e2e" - actValidate "github.com/steadybit/action-kit/go/action_kit_test/validate" - "github.com/steadybit/discovery-kit/go/discovery_kit_api" - "github.com/steadybit/discovery-kit/go/discovery_kit_test/validate" - "github.com/steadybit/extension-kit/extlogging" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "io" - "os" - "os/exec" - "path/filepath" - "strconv" - "strings" - "testing" - "time" -) - -var kafkactl func(ctx context.Context, commands ...string) (string, error) -var kafkactlStop func() - -func TestWithMinikube(t *testing.T) { - extlogging.InitZeroLog() - - extFactory := e2e.HelmExtensionFactory{ - Name: "extension-kafka", - Port: 8083, - - ExtraArgs: func(m *e2e.Minikube) []string { - return []string{ - "--set", "logging.lever=debug", - "--set", "kafka.seedBrokers='my-kafka.default.svc.cluster.local:9092'", - "--set", "kafka.auth.saslMechanism=PLAIN", - "--set", "kafka.auth.saslUser=user1", - "--set", "kafka.auth.saslPassword=steadybit", - } - }, - } - - defer func() { - if kafkactlStop != nil { - kafkactlStop() - } - }() - - e2e.WithMinikube(t, e2e.DefaultMinikubeOpts().AfterStart(helmInstallLocalStack).AfterStart(setupKafkactl), &extFactory, []e2e.WithMinikubeTestCase{ - { - Name: "validate discovery", - Test: validateDiscovery, - }, - { - Name: "test discovery", - Test: testDiscovery, - }, - { - Name: "validate Actions", - Test: validateActions, - }, - { - Name: "alter num io threads", - Test: testAlterNumIoThreads, - }, - { - Name: "alter num network threads", - Test: testAlterNumNetworkThreads, - }, - { - Name: "alter limit connection creation rate", - Test: testAlterLimitConnectionCreationRate, - }, - { - Name: "alter max message bytes", - Test: testAlterMaxMessageBytes, - }, - }) -} - -func validateDiscovery(t *testing.T, _ *e2e.Minikube, e *e2e.Extension) { - assert.NoError(t, validate.ValidateEndpointReferences("/", e.Client)) -} - -func testDiscovery(t *testing.T, _ *e2e.Minikube, e *e2e.Extension) { - ctx, cancel := context.WithTimeout(t.Context(), 120*time.Second) - defer cancel() - - target, err := e2e.PollForTarget(ctx, e, "com.steadybit.extension_kafka.broker", func(target discovery_kit_api.Target) bool { - return e2e.HasAttribute(target, "kafka.broker.node-id", "1") - }) - require.NoError(t, err) - assert.Equal(t, target.TargetType, "com.steadybit.extension_kafka.broker") - assert.Equal(t, target.Attributes["kafka.broker.node-id"], []string{"1"}) - assert.Equal(t, target.Attributes["kafka.broker.port"], []string{"9092"}) - assert.Equal(t, target.Attributes["kafka.broker.host"], []string{"my-kafka-controller-1.my-kafka-controller-headless.default.svc.cluster.local"}) -} - -func validateActions(t *testing.T, _ *e2e.Minikube, e *e2e.Extension) { - assert.NoError(t, actValidate.ValidateEndpointReferences("/", e.Client)) -} - -func testAlterNumIoThreads(t *testing.T, _ *e2e.Minikube, e *e2e.Extension) { - target := &action_kit_api.Target{ - Name: "test_broker", - Attributes: map[string][]string{ - "kafka.broker.node-id": {"0"}, - }, - } - - config := struct { - Duration int `json:"duration"` - IoThreads float32 `json:"io_threads"` - }{ - Duration: 20000, - IoThreads: 1.0, - } - - // Reduce - increaseThreadsAction, err := e.RunAction("com.steadybit.extension_kafka.broker.limit-io-threads", target, config, &action_kit_api.ExecutionContext{}) - require.NoError(t, err) - defer func() { _ = increaseThreadsAction.Cancel() }() - - require.EventuallyWithT(t, func(c *assert.CollectT) { - brokerConfig, err := kafkactl(t.Context(), "describe", "broker", "0") - assert.NoError(c, err, "Failed to describe broker config") - assert.Regexp(c, `num\.io\.threads\s+1`, brokerConfig, "property not found") - }, 20*time.Second, 1*time.Second, "num.io.threads should be set to 1") - - require.NoError(t, increaseThreadsAction.Wait()) - require.NotEmpty(t, t, increaseThreadsAction.Messages()) - require.NotEmpty(t, t, increaseThreadsAction.Metrics()) - - // Increase - config.IoThreads = 100.0 - decreaseThreadsAction, err := e.RunAction("com.steadybit.extension_kafka.broker.limit-io-threads", target, config, &action_kit_api.ExecutionContext{}) - require.NoError(t, err) - defer func() { _ = decreaseThreadsAction.Cancel() }() - - require.EventuallyWithT(t, func(c *assert.CollectT) { - brokerConfig, err := kafkactl(t.Context(), "describe", "broker", "0") - assert.NoError(c, err, "Failed to describe broker config") - assert.Regexp(c, `num\.io\.threads\s+1`, brokerConfig, "property not found") - }, 20*time.Second, 1*time.Second, "num.io.threads should be set to 100") - - require.NoError(t, increaseThreadsAction.Wait()) - require.NotEmpty(t, t, decreaseThreadsAction.Messages()) - require.NotEmpty(t, t, decreaseThreadsAction.Metrics()) -} - -func testAlterNumNetworkThreads(t *testing.T, _ *e2e.Minikube, e *e2e.Extension) { - target := &action_kit_api.Target{ - Name: "test_broker", - Attributes: map[string][]string{ - "kafka.broker.node-id": {"0"}, - }, - } - - config := struct { - Duration int `json:"duration"` - NetworkThreads int `json:"network_threads"` - }{ - Duration: 20000, - NetworkThreads: 1, - } - - // Reduce - increaseThreadsAction, err := e.RunAction("com.steadybit.extension_kafka.broker.limit-network-threads", target, config, &action_kit_api.ExecutionContext{}) - require.NoError(t, err) - defer func() { _ = increaseThreadsAction.Cancel() }() - - require.EventuallyWithT(t, func(c *assert.CollectT) { - brokerConfig, err := kafkactl(t.Context(), "describe", "broker", "0") - assert.NoError(c, err, "Failed to describe broker config") - assert.Regexp(c, `num\.network\.threads\s+1`, brokerConfig, "property not found") - }, 20*time.Second, 1*time.Second, "num.network.threads should be set to 1") - - require.NoError(t, increaseThreadsAction.Wait()) - require.NotEmpty(t, t, increaseThreadsAction.Messages()) - require.NotEmpty(t, t, increaseThreadsAction.Metrics()) - - // Increase - config.NetworkThreads = 100.0 - decreaseThreadsAction, err := e.RunAction("com.steadybit.extension_kafka.broker.limit-network-threads", target, config, &action_kit_api.ExecutionContext{}) - require.NoError(t, err) - defer func() { _ = decreaseThreadsAction.Cancel() }() - - require.EventuallyWithT(t, func(c *assert.CollectT) { - brokerConfig, err := kafkactl(t.Context(), "describe", "broker", "0") - assert.NoError(c, err, "Failed to describe broker config") - assert.Regexp(c, `num\.network\.threads\s+1`, brokerConfig, "property not found") - }, 20*time.Second, 1*time.Second, "num.network.threads should be set to 1") - - require.NoError(t, increaseThreadsAction.Wait()) - require.NotEmpty(t, t, decreaseThreadsAction.Messages()) - require.NotEmpty(t, t, decreaseThreadsAction.Metrics()) -} - -func testAlterLimitConnectionCreationRate(t *testing.T, _ *e2e.Minikube, e *e2e.Extension) { - target := &action_kit_api.Target{ - Name: "test_broker", - Attributes: map[string][]string{ - "kafka.broker.node-id": {"0"}, - }, - } - - config := struct { - Duration int `json:"duration"` - ConnectionRate int `json:"connection_rate"` - }{ - Duration: 20000, - ConnectionRate: 1, - } - - action, err := e.RunAction("com.steadybit.extension_kafka.broker.limit-connection-creation", target, config, &action_kit_api.ExecutionContext{}) - require.NoError(t, err) - defer func() { _ = action.Cancel() }() - - // Testing this setting by opening up too many connections does not work with the current setup, - // as executing "kubectl" commands is too slow, and it does not support parallel invocations. - require.EventuallyWithT(t, func(c *assert.CollectT) { - brokerConfig, err := kafkactl(t.Context(), "describe", "broker", "0") - assert.NoError(c, err, "Failed to describe broker config") - assert.Regexp(c, `max\.connection\.creation\.rate\s+1`, brokerConfig, "property not found") - }, 20*time.Second, 1*time.Second, "max.connection.creation.rate should be set to 1") - - require.NoError(t, action.Wait()) - require.NotEmpty(t, t, action.Messages()) - require.NotEmpty(t, t, action.Metrics()) -} - -func testAlterMaxMessageBytes(t *testing.T, _ *e2e.Minikube, e *e2e.Extension) { - message := fmt.Sprintf("{\"a\": \"%s\"}", strings.Repeat("x", 1000)) - out, err := kafkactl(t.Context(), "produce", "foo", "-v", message) - require.NoError(t, err, out) - - config := struct { - Duration int `json:"duration"` - MaxBytes int `json:"max_bytes"` - }{ - Duration: 20000, - MaxBytes: 100, - } - - // Change message size setting on all nodes - var action client.ActionExecution - for i := 0; i < 3; i++ { - target := &action_kit_api.Target{ - Name: "test_broker", - Attributes: map[string][]string{ - "kafka.broker.node-id": {strconv.Itoa(i)}, - }, - } - action, err = e.RunAction("com.steadybit.extension_kafka.broker.reduce-message-max-bytes", target, config, &action_kit_api.ExecutionContext{}) - require.NoError(t, err) - //goland:noinspection ALL - defer func(a client.ActionExecution) { _ = a.Cancel() }(action) - } - - require.EventuallyWithT(t, func(c *assert.CollectT) { - out, err = kafkactl(t.Context(), "produce", "foo", "-v", message) - require.Error(t, err, out) - }, 20*time.Second, 1*time.Second, "long messages should be rejected") - - //goland:noinspection GoDfaNilDereference - require.NoError(t, action.Wait()) - require.NotEmpty(t, t, action.Messages()) - require.NotEmpty(t, t, action.Metrics()) -} - -func helmInstallLocalStack(minikube *e2e.Minikube) error { - out, err := exec.Command("helm", "repo", "add", "bitnami", "https://charts.bitnami.com/bitnami").CombinedOutput() - if err != nil { - return fmt.Errorf("failed to install helm chart: %s: %s", err, out) - } - out, err = exec.Command("helm", - "upgrade", "--install", - "--kube-context", minikube.Profile, - "--set", "sasl.client.passwords=steadybit", - "--set", "provisioning.enabled=true", - "--set", "provisioning.topics[0].name=foo", - "--set", "image.repository=bitnamilegacy/kafka", - "--set", "image.tag=4.0.0-debian-12-r10", - "--set", "global.security.allowInsecureImages=true", - "--namespace=default", - "--timeout=15m0s", - "my-kafka", "bitnami/kafka ", "--wait").CombinedOutput() - if err != nil { - return fmt.Errorf("failed to install helm chart: %s: %s", err, out) - } - return nil -} - -func setupKafkactl(m *e2e.Minikube) error { - configPath := filepath.Join(os.Getenv("HOME"), ".config", "kafkactl", "config.yml") - if err := os.MkdirAll(filepath.Dir(configPath), 0755); err != nil { - return fmt.Errorf("failed to create config directory: %w", err) - } - - backupPath := configPath + ".backup" - if _, err := os.Stat(configPath); err == nil { - if err := copyFile(configPath, backupPath); err != nil { - return fmt.Errorf("failed to backup config: %w", err) - } - } - - stop := func() { - if err := os.Rename(backupPath, configPath); err != nil { - log.Error().Err(err).Msg("Failed to restore original config") - } - } - - configContent := fmt.Sprintf(`contexts: - e2e: - brokers: - - my-kafka.default.svc.cluster.local:9092 - tls: - enabled: false - sasl: - enabled: true - username: user1 - password: steadybit - kubernetes: - enabled: true - kubecontext: %s - namespace: default -`, m.Profile) - - if err := os.WriteFile(configPath, []byte(configContent), 0644); err != nil { - stop() - return fmt.Errorf("failed to write temporary config: %w", err) - } - - kafkactl = func(ctx context.Context, commands ...string) (string, error) { - cmd := exec.CommandContext(ctx, "kafkactl", append(commands, "--context", "e2e")...) - output, err := cmd.CombinedOutput() - if err != nil { - return "", fmt.Errorf("kafkactl command failed: %w, output: %s", err, string(output)) - } - return string(output), nil - } - kafkactlStop = stop - return nil -} - -func copyFile(src, dst string) error { - sourceFile, err := os.Open(src) - if err != nil { - return err - } - defer func(sourceFile *os.File) { - _ = sourceFile.Close() - }(sourceFile) - - destFile, err := os.Create(dst) - if err != nil { - return err - } - defer func(destFile *os.File) { - _ = destFile.Close() - }(destFile) - - _, err = io.Copy(destFile, sourceFile) - return err -} diff --git a/extkafka/alter_actions_test.go b/extkafka/alter_actions_test.go deleted file mode 100644 index d58338d..0000000 --- a/extkafka/alter_actions_test.go +++ /dev/null @@ -1,69 +0,0 @@ -// SPDX-License-Identifier: MIT -// SPDX-FileCopyrightText: 2025 Steadybit GmbH - -package extkafka - -import ( - "fmt" - "github.com/steadybit/extension-kit/extutil" - "github.com/stretchr/testify/assert" - "testing" -) - -func TestAlterActions_Describe(t *testing.T) { - t.Run("AlterLimitConnectionCreate", func(t *testing.T) { - //Given - action := AlterLimitConnectionCreateRateAttack{} - //When - response := action.Describe() - - //Then - assert.Equal(t, "Limit the Connection Creation Rate", response.Description) - assert.Equal(t, "Limit Connection Creation Rate", response.Label) - assert.Equal(t, kafkaBrokerTargetId, response.TargetSelection.TargetType) - assert.Equal(t, fmt.Sprintf("%s.limit-connection-creation", kafkaBrokerTargetId), response.Id) - assert.Equal(t, extutil.Ptr("Kafka"), response.Technology) - }) - - t.Run("AlterMessageMaxBytes", func(t *testing.T) { - //Given - action := AlterMessageMaxBytesAttack{} - //When - response := action.Describe() - - //Then - assert.Equal(t, "Reduce the max bytes allowed per message", response.Description) - assert.Equal(t, "Reduce Message Batch Size", response.Label) - assert.Equal(t, kafkaBrokerTargetId, response.TargetSelection.TargetType) - assert.Equal(t, fmt.Sprintf("%s.reduce-message-max-bytes", kafkaBrokerTargetId), response.Id) - assert.Equal(t, extutil.Ptr("Kafka"), response.Technology) - }) - - t.Run("AlterNumberIOThreads", func(t *testing.T) { - //Given - action := AlterNumberIOThreadsAttack{} - //When - response := action.Describe() - - //Then - assert.Equal(t, "Limit the number of IO threads", response.Description) - assert.Equal(t, "Limit IO Threads", response.Label) - assert.Equal(t, kafkaBrokerTargetId, response.TargetSelection.TargetType) - assert.Equal(t, fmt.Sprintf("%s.limit-io-threads", kafkaBrokerTargetId), response.Id) - assert.Equal(t, extutil.Ptr("Kafka"), response.Technology) - }) - - t.Run("AlterNumberNetworkThreads", func(t *testing.T) { - //Given - action := AlterNumberNetworkThreadsAttack{} - //When - response := action.Describe() - - //Then - assert.Equal(t, "Limit the number of network threads", response.Description) - assert.Equal(t, "Limit Network Threads", response.Label) - assert.Equal(t, kafkaBrokerTargetId, response.TargetSelection.TargetType) - assert.Equal(t, fmt.Sprintf("%s.limit-network-threads", kafkaBrokerTargetId), response.Id) - assert.Equal(t, extutil.Ptr("Kafka"), response.Technology) - }) -} diff --git a/extkafka/alter_limit_connection_creation_rate.go b/extkafka/alter_limit_connection_creation_rate.go deleted file mode 100644 index 0357c35..0000000 --- a/extkafka/alter_limit_connection_creation_rate.go +++ /dev/null @@ -1,100 +0,0 @@ -// SPDX-License-Identifier: MIT -// SPDX-FileCopyrightText: 2025 Steadybit GmbH - -package extkafka - -import ( - "context" - "fmt" - "github.com/steadybit/action-kit/go/action_kit_api/v2" - "github.com/steadybit/action-kit/go/action_kit_sdk" - "github.com/steadybit/extension-kafka/config" - "github.com/steadybit/extension-kit/extbuild" - "github.com/steadybit/extension-kit/extutil" - "strings" -) - -type AlterLimitConnectionCreateRateAttack struct{} - -const ( - LimitConnectionRate = "max.connection.creation.rate" -) - -var _ action_kit_sdk.Action[AlterState] = (*AlterLimitConnectionCreateRateAttack)(nil) - -func NewAlterLimitConnectionCreateRateAttack() action_kit_sdk.Action[AlterState] { - return &AlterLimitConnectionCreateRateAttack{} -} - -func (k *AlterLimitConnectionCreateRateAttack) NewEmptyState() AlterState { - return AlterState{} -} - -func (k *AlterLimitConnectionCreateRateAttack) Describe() action_kit_api.ActionDescription { - return action_kit_api.ActionDescription{ - Id: fmt.Sprintf("%s.limit-connection-creation", kafkaBrokerTargetId), - Label: "Limit Connection Creation Rate", - Description: "Limit the Connection Creation Rate", - Version: extbuild.GetSemverVersionStringOrUnknown(), - Icon: extutil.Ptr(kafkaIcon), - TargetSelection: extutil.Ptr(action_kit_api.TargetSelection{ - TargetType: kafkaBrokerTargetId, - SelectionTemplates: extutil.Ptr([]action_kit_api.TargetSelectionTemplate{ - { - Label: "broker node id", - Description: extutil.Ptr("Find broker by node id"), - Query: "kafka.broker.node-id=\"\"", - }, - }), - }), - Technology: extutil.Ptr("Kafka"), - Category: extutil.Ptr("Kafka"), - TimeControl: action_kit_api.TimeControlExternal, - Kind: action_kit_api.Attack, - Parameters: []action_kit_api.ActionParameter{ - durationAlter, - { - Label: "Connection creation rate", - Description: extutil.Ptr("Limit the connection creation rate to simulate slow acceptance of new connections."), - Name: "connection_rate", - Type: action_kit_api.ActionParameterTypeInteger, - DefaultValue: extutil.Ptr("10"), - Required: extutil.Ptr(true), - }, - }, - } -} - -func (k *AlterLimitConnectionCreateRateAttack) Prepare(ctx context.Context, state *AlterState, request action_kit_api.PrepareActionRequestBody) (*action_kit_api.PrepareResult, error) { - var err error - state.BrokerID = extutil.ToInt32(request.Target.Attributes["kafka.broker.node-id"][0]) - state.BrokerHosts = strings.Split(config.Config.SeedBrokers, ",") - state.TargetBrokerConfigValue = extutil.ToInt(request.Config["connection_rate"]) - state.InitialBrokerConfigValue, err = describeConfigInt(ctx, state.BrokerHosts, LimitConnectionRate, state.BrokerID) - return nil, err -} - -func (k *AlterLimitConnectionCreateRateAttack) Start(ctx context.Context, state *AlterState) (*action_kit_api.StartResult, error) { - if err := alterConfigInt(ctx, state.BrokerHosts, LimitConnectionRate, state.TargetBrokerConfigValue, state.BrokerID); err != nil { - return nil, err - } - return &action_kit_api.StartResult{ - Messages: &[]action_kit_api.Message{{ - Level: extutil.Ptr(action_kit_api.Info), - Message: fmt.Sprintf("Alter config %s with value %d (initial value was: %d) for broker node-id: %v", LimitConnectionRate, state.TargetBrokerConfigValue, state.InitialBrokerConfigValue, state.BrokerID), - }}, - }, nil -} - -func (k *AlterLimitConnectionCreateRateAttack) Stop(ctx context.Context, state *AlterState) (*action_kit_api.StopResult, error) { - err := alterConfigInt(ctx, state.BrokerHosts, LimitConnectionRate, state.InitialBrokerConfigValue, state.BrokerID) - if err != nil { - return nil, err - } - return &action_kit_api.StopResult{ - Messages: &[]action_kit_api.Message{{ - Level: extutil.Ptr(action_kit_api.Info), - Message: fmt.Sprintf("Alter config %s back to initial value %d for broker node-id: %v", LimitConnectionRate, state.InitialBrokerConfigValue, state.BrokerID), - }}, - }, nil -} diff --git a/extkafka/alter_max_message_bytes.go b/extkafka/alter_max_message_bytes.go deleted file mode 100644 index acc3bc7..0000000 --- a/extkafka/alter_max_message_bytes.go +++ /dev/null @@ -1,99 +0,0 @@ -// SPDX-License-Identifier: MIT -// SPDX-FileCopyrightText: 2025 Steadybit GmbH - -package extkafka - -import ( - "context" - "fmt" - "github.com/steadybit/action-kit/go/action_kit_api/v2" - "github.com/steadybit/action-kit/go/action_kit_sdk" - "github.com/steadybit/extension-kafka/config" - "github.com/steadybit/extension-kit/extbuild" - "github.com/steadybit/extension-kit/extutil" - "strings" -) - -type AlterMessageMaxBytesAttack struct{} - -const ( - MessageMaxBytes = "message.max.bytes" -) - -var _ action_kit_sdk.Action[AlterState] = (*AlterMessageMaxBytesAttack)(nil) - -func NewAlterMaxMessageBytesAttack() action_kit_sdk.Action[AlterState] { - return &AlterMessageMaxBytesAttack{} -} - -func (k *AlterMessageMaxBytesAttack) NewEmptyState() AlterState { - return AlterState{} -} - -func (k *AlterMessageMaxBytesAttack) Describe() action_kit_api.ActionDescription { - return action_kit_api.ActionDescription{ - Id: fmt.Sprintf("%s.reduce-message-max-bytes", kafkaBrokerTargetId), - Label: "Reduce Message Batch Size", - Description: "Reduce the max bytes allowed per message", - Version: extbuild.GetSemverVersionStringOrUnknown(), - Icon: extutil.Ptr(kafkaIcon), - TargetSelection: extutil.Ptr(action_kit_api.TargetSelection{ - TargetType: kafkaBrokerTargetId, - SelectionTemplates: extutil.Ptr([]action_kit_api.TargetSelectionTemplate{ - { - Label: "broker node id", - Description: extutil.Ptr("Find broker by node id"), - Query: "kafka.broker.node-id=\"\"", - }, - }), - }), - Technology: extutil.Ptr("Kafka"), - Category: extutil.Ptr("Kafka"), - TimeControl: action_kit_api.TimeControlExternal, - Kind: action_kit_api.Attack, - Parameters: []action_kit_api.ActionParameter{ - durationAlter, - { - Label: "Max bytes per message", - Description: extutil.Ptr("Set a very low max bytes per message to simulate message size rejections."), - Name: "max_bytes", - Type: action_kit_api.ActionParameterTypeInteger, - DefaultValue: extutil.Ptr("100"), - Required: extutil.Ptr(true), - }, - }, - } -} - -func (k *AlterMessageMaxBytesAttack) Prepare(ctx context.Context, state *AlterState, request action_kit_api.PrepareActionRequestBody) (*action_kit_api.PrepareResult, error) { - var err error - state.BrokerID = extutil.ToInt32(request.Target.Attributes["kafka.broker.node-id"][0]) - state.BrokerHosts = strings.Split(config.Config.SeedBrokers, ",") - state.TargetBrokerConfigValue = extutil.ToInt(request.Config["max_bytes"]) - state.InitialBrokerConfigValue, err = describeConfigInt(ctx, state.BrokerHosts, MessageMaxBytes, state.BrokerID) - return nil, err -} - -func (k *AlterMessageMaxBytesAttack) Start(ctx context.Context, state *AlterState) (*action_kit_api.StartResult, error) { - if err := alterConfigInt(ctx, state.BrokerHosts, MessageMaxBytes, state.TargetBrokerConfigValue, state.BrokerID); err != nil { - return nil, err - } - return &action_kit_api.StartResult{ - Messages: &[]action_kit_api.Message{{ - Level: extutil.Ptr(action_kit_api.Info), - Message: fmt.Sprintf("Alter config %s with value %d (initial value was: %d) for broker node-id: %v", MessageMaxBytes, state.TargetBrokerConfigValue, state.InitialBrokerConfigValue, state.BrokerID), - }}, - }, nil -} - -func (k *AlterMessageMaxBytesAttack) Stop(ctx context.Context, state *AlterState) (*action_kit_api.StopResult, error) { - if err := alterConfigInt(ctx, state.BrokerHosts, MessageMaxBytes, state.InitialBrokerConfigValue, state.BrokerID); err != nil { - return nil, err - } - return &action_kit_api.StopResult{ - Messages: &[]action_kit_api.Message{{ - Level: extutil.Ptr(action_kit_api.Info), - Message: fmt.Sprintf("Alter config %s back to initial value %d for broker node-id: %v", MessageMaxBytes, state.InitialBrokerConfigValue, state.BrokerID), - }}, - }, nil -} diff --git a/extkafka/alter_num_io_threads.go b/extkafka/alter_num_io_threads.go deleted file mode 100644 index 4764238..0000000 --- a/extkafka/alter_num_io_threads.go +++ /dev/null @@ -1,99 +0,0 @@ -// SPDX-License-Identifier: MIT -// SPDX-FileCopyrightText: 2025 Steadybit GmbH - -package extkafka - -import ( - "context" - "fmt" - "github.com/steadybit/action-kit/go/action_kit_api/v2" - "github.com/steadybit/action-kit/go/action_kit_sdk" - "github.com/steadybit/extension-kafka/config" - "github.com/steadybit/extension-kit/extbuild" - "github.com/steadybit/extension-kit/extutil" - "strings" -) - -type AlterNumberIOThreadsAttack struct{} - -const ( - NumberIOThreads = "num.io.threads" -) - -var _ action_kit_sdk.Action[AlterState] = (*AlterNumberIOThreadsAttack)(nil) - -func NewAlterNumberIOThreadsAttack() action_kit_sdk.Action[AlterState] { - return &AlterNumberIOThreadsAttack{} -} - -func (k *AlterNumberIOThreadsAttack) NewEmptyState() AlterState { - return AlterState{} -} - -func (k *AlterNumberIOThreadsAttack) Describe() action_kit_api.ActionDescription { - return action_kit_api.ActionDescription{ - Id: fmt.Sprintf("%s.limit-io-threads", kafkaBrokerTargetId), - Label: "Limit IO Threads", - Description: "Limit the number of IO threads", - Version: extbuild.GetSemverVersionStringOrUnknown(), - Icon: extutil.Ptr(kafkaIcon), - TargetSelection: extutil.Ptr(action_kit_api.TargetSelection{ - TargetType: kafkaBrokerTargetId, - SelectionTemplates: extutil.Ptr([]action_kit_api.TargetSelectionTemplate{ - { - Label: "broker node id", - Description: extutil.Ptr("Find broker by node id"), - Query: "kafka.broker.node-id=\"\"", - }, - }), - }), - Technology: extutil.Ptr("Kafka"), - Category: extutil.Ptr("Kafka"), - TimeControl: action_kit_api.TimeControlExternal, - Kind: action_kit_api.Attack, - Parameters: []action_kit_api.ActionParameter{ - durationAlter, - { - Label: "Number of IO Threads", - Description: extutil.Ptr("Reduce the number of I/O threads to limit the broker’s capacity to perform disk operations, potentially causing increased latency or request timeouts."), - Name: "io_threads", - Type: action_kit_api.ActionParameterTypeInteger, - DefaultValue: extutil.Ptr("4"), - Required: extutil.Ptr(true), - }, - }, - } -} - -func (k *AlterNumberIOThreadsAttack) Prepare(ctx context.Context, state *AlterState, request action_kit_api.PrepareActionRequestBody) (*action_kit_api.PrepareResult, error) { - var err error - state.BrokerID = extutil.ToInt32(request.Target.Attributes["kafka.broker.node-id"][0]) - state.BrokerHosts = strings.Split(config.Config.SeedBrokers, ",") - state.TargetBrokerConfigValue = extutil.ToInt(request.Config["io_threads"]) - state.InitialBrokerConfigValue, err = describeConfigInt(ctx, state.BrokerHosts, NumberIOThreads, state.BrokerID) - return nil, err -} - -func (k *AlterNumberIOThreadsAttack) Start(ctx context.Context, state *AlterState) (*action_kit_api.StartResult, error) { - if err := adjustThreads(ctx, state.BrokerHosts, NumberIOThreads, state.TargetBrokerConfigValue, state.BrokerID); err != nil { - return nil, err - } - return &action_kit_api.StartResult{ - Messages: &[]action_kit_api.Message{{ - Level: extutil.Ptr(action_kit_api.Info), - Message: fmt.Sprintf("Alter config %s with value %d (initial value was: %d) for broker node-id: %v", NumberIOThreads, state.TargetBrokerConfigValue, state.InitialBrokerConfigValue, state.BrokerID), - }}, - }, nil -} - -func (k *AlterNumberIOThreadsAttack) Stop(ctx context.Context, state *AlterState) (*action_kit_api.StopResult, error) { - if err := adjustThreads(ctx, state.BrokerHosts, NumberIOThreads, state.InitialBrokerConfigValue, state.BrokerID); err != nil { - return nil, err - } - return &action_kit_api.StopResult{ - Messages: &[]action_kit_api.Message{{ - Level: extutil.Ptr(action_kit_api.Info), - Message: fmt.Sprintf("Alter config %s back to initial value %d for broker node-id: %v", NumberIOThreads, state.InitialBrokerConfigValue, state.BrokerID), - }}, - }, nil -} diff --git a/extkafka/alter_num_network_threads.go b/extkafka/alter_num_network_threads.go deleted file mode 100644 index 5636395..0000000 --- a/extkafka/alter_num_network_threads.go +++ /dev/null @@ -1,99 +0,0 @@ -// SPDX-License-Identifier: MIT -// SPDX-FileCopyrightText: 2025 Steadybit GmbH - -package extkafka - -import ( - "context" - "fmt" - "github.com/steadybit/action-kit/go/action_kit_api/v2" - "github.com/steadybit/action-kit/go/action_kit_sdk" - "github.com/steadybit/extension-kafka/config" - "github.com/steadybit/extension-kit/extbuild" - "github.com/steadybit/extension-kit/extutil" - "strings" -) - -type AlterNumberNetworkThreadsAttack struct{} - -const ( - NumberNetworkThreads = "num.network.threads" -) - -var _ action_kit_sdk.Action[AlterState] = (*AlterNumberNetworkThreadsAttack)(nil) - -func NewAlterNumberNetworkThreadsAttack() action_kit_sdk.Action[AlterState] { - return &AlterNumberNetworkThreadsAttack{} -} - -func (k *AlterNumberNetworkThreadsAttack) NewEmptyState() AlterState { - return AlterState{} -} - -func (k *AlterNumberNetworkThreadsAttack) Describe() action_kit_api.ActionDescription { - return action_kit_api.ActionDescription{ - Id: fmt.Sprintf("%s.limit-network-threads", kafkaBrokerTargetId), - Label: "Limit Network Threads", - Description: "Limit the number of network threads", - Version: extbuild.GetSemverVersionStringOrUnknown(), - Icon: extutil.Ptr(kafkaIcon), - TargetSelection: extutil.Ptr(action_kit_api.TargetSelection{ - TargetType: kafkaBrokerTargetId, - SelectionTemplates: extutil.Ptr([]action_kit_api.TargetSelectionTemplate{ - { - Label: "broker node id", - Description: extutil.Ptr("Find broker by node id"), - Query: "kafka.broker.node-id=\"\"", - }, - }), - }), - Technology: extutil.Ptr("Kafka"), - Category: extutil.Ptr("Kafka"), - TimeControl: action_kit_api.TimeControlExternal, - Kind: action_kit_api.Attack, - Parameters: []action_kit_api.ActionParameter{ - durationAlter, - { - Label: "Number of Network Threads", - Description: extutil.Ptr("Reduce the num.network.threads to limit the broker’s ability to process network requests."), - Name: "network_threads", - Type: action_kit_api.ActionParameterTypeInteger, - DefaultValue: extutil.Ptr("4"), - Required: extutil.Ptr(true), - }, - }, - } -} - -func (k *AlterNumberNetworkThreadsAttack) Prepare(ctx context.Context, state *AlterState, request action_kit_api.PrepareActionRequestBody) (*action_kit_api.PrepareResult, error) { - var err error - state.BrokerID = extutil.ToInt32(request.Target.Attributes["kafka.broker.node-id"][0]) - state.BrokerHosts = strings.Split(config.Config.SeedBrokers, ",") - state.TargetBrokerConfigValue = extutil.ToInt(request.Config["network_threads"]) - state.InitialBrokerConfigValue, err = describeConfigInt(ctx, state.BrokerHosts, NumberNetworkThreads, state.BrokerID) - return nil, err -} - -func (k *AlterNumberNetworkThreadsAttack) Start(ctx context.Context, state *AlterState) (*action_kit_api.StartResult, error) { - if err := adjustThreads(ctx, state.BrokerHosts, NumberNetworkThreads, state.TargetBrokerConfigValue, state.BrokerID); err != nil { - return nil, err - } - return &action_kit_api.StartResult{ - Messages: &[]action_kit_api.Message{{ - Level: extutil.Ptr(action_kit_api.Info), - Message: fmt.Sprintf("Alter config %s with value %d (initial value was: %d) for broker node-id: %v", NumberNetworkThreads, state.TargetBrokerConfigValue, state.InitialBrokerConfigValue, state.BrokerID), - }}, - }, nil -} - -func (k *AlterNumberNetworkThreadsAttack) Stop(ctx context.Context, state *AlterState) (*action_kit_api.StopResult, error) { - if err := adjustThreads(ctx, state.BrokerHosts, NumberNetworkThreads, state.InitialBrokerConfigValue, state.BrokerID); err != nil { - return nil, err - } - return &action_kit_api.StopResult{ - Messages: &[]action_kit_api.Message{{ - Level: extutil.Ptr(action_kit_api.Info), - Message: fmt.Sprintf("Alter config %s back to initial value %d for broker node-id: %v", NumberNetworkThreads, state.InitialBrokerConfigValue, state.BrokerID), - }}, - }, nil -} diff --git a/extkafka/broker_deny_user.go b/extkafka/broker_deny_user.go deleted file mode 100644 index d7c35e3..0000000 --- a/extkafka/broker_deny_user.go +++ /dev/null @@ -1,171 +0,0 @@ -// SPDX-License-Identifier: MIT -// SPDX-FileCopyrightText: 2023 Steadybit GmbH - -package extkafka - -import ( - "context" - "errors" - "fmt" - "github.com/steadybit/action-kit/go/action_kit_api/v2" - "github.com/steadybit/action-kit/go/action_kit_sdk" - "github.com/steadybit/extension-kafka/config" - "github.com/steadybit/extension-kit/extbuild" - "github.com/steadybit/extension-kit/extutil" - "github.com/twmb/franz-go/pkg/kadm" - "strings" -) - -type KafkaConsumerDenyAccessAttack struct{} - -type KafkaDenyUserState struct { - ConsumerGroup string - Topic string - User string - BrokerHosts []string -} - -var _ action_kit_sdk.Action[KafkaDenyUserState] = (*KafkaConsumerDenyAccessAttack)(nil) - -func NewKafkaConsumerDenyAccessAttack() action_kit_sdk.Action[KafkaDenyUserState] { - return &KafkaConsumerDenyAccessAttack{} -} - -func (k *KafkaConsumerDenyAccessAttack) NewEmptyState() KafkaDenyUserState { - return KafkaDenyUserState{} -} - -func (k *KafkaConsumerDenyAccessAttack) Describe() action_kit_api.ActionDescription { - return action_kit_api.ActionDescription{ - Id: fmt.Sprintf("%s.deny-access", kafkaConsumerTargetId), - Label: "Deny Access", - Description: "Deny access to a topic for one or many consumer groups on all kafka hosts", - Version: extbuild.GetSemverVersionStringOrUnknown(), - Icon: extutil.Ptr(kafkaIcon), - TargetSelection: extutil.Ptr(action_kit_api.TargetSelection{ - TargetType: kafkaConsumerTargetId, - SelectionTemplates: extutil.Ptr([]action_kit_api.TargetSelectionTemplate{ - { - Label: "consumer group name", - Description: extutil.Ptr("Find consumer group by name"), - Query: "kafka.consumer-group.name=\"\"", - }, - }), - }), - Technology: extutil.Ptr("Kafka"), - Category: extutil.Ptr("Kafka"), - TimeControl: action_kit_api.TimeControlExternal, - Kind: action_kit_api.Attack, - Parameters: []action_kit_api.ActionParameter{ - { - Label: "Duration", - Description: extutil.Ptr("The duration of the action. The broker configuration will be reverted at the end of the action."), - Name: "duration", - Type: action_kit_api.ActionParameterTypeDuration, - DefaultValue: extutil.Ptr("180s"), - Required: extutil.Ptr(true), - }, - { - Label: "User", - Description: extutil.Ptr("The user affected by the ACL."), - Name: "user", - Type: action_kit_api.ActionParameterTypeString, - Required: extutil.Ptr(true), - }, - { - Label: "Topic to deny access", - Name: "topic", - Description: extutil.Ptr("One topic to deny access to"), - Type: action_kit_api.ActionParameterTypeString, - Required: extutil.Ptr(true), - Options: extutil.Ptr([]action_kit_api.ParameterOption{ - action_kit_api.ParameterOptionsFromTargetAttribute{ - Attribute: "kafka.consumer-group.topics", - }, - }), - }, - }, - } -} - -func (k *KafkaConsumerDenyAccessAttack) Prepare(_ context.Context, state *KafkaDenyUserState, request action_kit_api.PrepareActionRequestBody) (*action_kit_api.PrepareResult, error) { - if len(request.Target.Attributes["kafka.consumer-group.name"]) == 0 { - return nil, fmt.Errorf("the target is missing the kafka.consumer-group.name attribute") - } - state.ConsumerGroup = request.Target.Attributes["kafka.consumer-group.name"][0] - state.Topic = extutil.ToString(request.Config["topic"]) - state.User = extutil.ToString(request.Config["user"]) - state.BrokerHosts = strings.Split(config.Config.SeedBrokers, ",") - - return nil, nil -} - -func (k *KafkaConsumerDenyAccessAttack) Start(ctx context.Context, state *KafkaDenyUserState) (*action_kit_api.StartResult, error) { - client, err := createNewAdminClient(state.BrokerHosts) - if err != nil { - return nil, fmt.Errorf("failed to initialize kafka client: %s", err.Error()) - } - defer client.Close() - - acl := kadm.NewACLs(). - ResourcePatternType(kadm.ACLPatternLiteral). - Topics(state.Topic). - Groups(state.ConsumerGroup). - Operations(kadm.OpRead, kadm.OpWrite, kadm.OpDescribe). - Deny("User:" + state.User).DenyHosts() - - results, err := client.CreateACLs(ctx, acl) - if err != nil { - return nil, err - } - var errs []error - for _, result := range results { - if result.Err != nil { - detailedError := errors.New(result.Err.Error() + result.ErrMessage) - errs = append(errs, detailedError) - } - } - if len(errs) > 0 { - return nil, errors.Join(errs...) - } - - return &action_kit_api.StartResult{ - Messages: &[]action_kit_api.Message{{ - Level: extutil.Ptr(action_kit_api.Info), - Message: fmt.Sprintf("Deny access for consumer group %s for every broker hosts", state.ConsumerGroup), - }}, - }, nil - -} - -func (k *KafkaConsumerDenyAccessAttack) Stop(ctx context.Context, state *KafkaDenyUserState) (*action_kit_api.StopResult, error) { - client, err := createNewAdminClient(state.BrokerHosts) - if err != nil { - return nil, fmt.Errorf("failed to initialize kafka client: %s", err.Error()) - } - defer client.Close() - - acl := kadm.NewACLs(). - ResourcePatternType(kadm.ACLPatternLiteral). - Topics(state.Topic). - Groups(state.ConsumerGroup). - Operations(kadm.OpRead, kadm.OpWrite, kadm.OpDescribe). - Deny("User:" + state.User).DenyHosts() - - results, err := client.DeleteACLs(ctx, acl) - if err != nil { - return nil, err - } - var errs []error - for _, result := range results { - if result.Err != nil { - detailedError := errors.New(result.Err.Error() + result.ErrMessage) - errs = append(errs, detailedError) - } - } - if len(errs) > 0 { - return nil, errors.Join(errs...) - } - - return nil, nil -} diff --git a/extkafka/broker_discovery.go b/extkafka/broker_discovery.go deleted file mode 100644 index 1937ecb..0000000 --- a/extkafka/broker_discovery.go +++ /dev/null @@ -1,265 +0,0 @@ -// SPDX-License-Identifier: MIT -// SPDX-FileCopyrightText: 2024 Steadybit GmbH - -package extkafka - -import ( - "context" - "fmt" - "github.com/rs/zerolog/log" - "github.com/steadybit/discovery-kit/go/discovery_kit_api" - "github.com/steadybit/discovery-kit/go/discovery_kit_commons" - "github.com/steadybit/discovery-kit/go/discovery_kit_sdk" - "github.com/steadybit/extension-kafka/config" - "github.com/steadybit/extension-kit/extbuild" - "github.com/steadybit/extension-kit/extutil" - "github.com/twmb/franz-go/pkg/kadm" - "strconv" - "strings" - "time" -) - -type kafkaBrokerDiscovery struct { -} - -var ( - _ discovery_kit_sdk.TargetDescriber = (*kafkaBrokerDiscovery)(nil) - _ discovery_kit_sdk.AttributeDescriber = (*kafkaBrokerDiscovery)(nil) - _ discovery_kit_sdk.EnrichmentRulesDescriber = (*kafkaBrokerDiscovery)(nil) -) - -func NewKafkaBrokerDiscovery(ctx context.Context) discovery_kit_sdk.TargetDiscovery { - discovery := &kafkaBrokerDiscovery{} - return discovery_kit_sdk.NewCachedTargetDiscovery(discovery, - discovery_kit_sdk.WithRefreshTargetsNow(), - discovery_kit_sdk.WithRefreshTargetsInterval(ctx, time.Duration(config.Config.DiscoveryIntervalKafkaBroker)*time.Second), - ) -} - -func (r *kafkaBrokerDiscovery) Describe() discovery_kit_api.DiscoveryDescription { - return discovery_kit_api.DiscoveryDescription{ - Id: kafkaBrokerTargetId, - Discover: discovery_kit_api.DescribingEndpointReferenceWithCallInterval{ - CallInterval: extutil.Ptr(fmt.Sprintf("%ds", config.Config.DiscoveryIntervalKafkaBroker)), - }, - } -} - -func (r *kafkaBrokerDiscovery) DescribeTarget() discovery_kit_api.TargetDescription { - return discovery_kit_api.TargetDescription{ - Id: kafkaBrokerTargetId, - Label: discovery_kit_api.PluralLabel{One: "Kafka broker", Other: "Kafka brokers"}, - Category: extutil.Ptr("kafka"), - Version: extbuild.GetSemverVersionStringOrUnknown(), - Icon: extutil.Ptr(kafkaIcon), - Table: discovery_kit_api.Table{ - Columns: []discovery_kit_api.Column{ - {Attribute: "steadybit.label"}, - {Attribute: "kafka.broker.cluster-name"}, - {Attribute: "kafka.broker.node-id"}, - {Attribute: "kafka.broker.is-controller"}, - {Attribute: "kafka.broker.host"}, - {Attribute: "kafka.broker.port"}, - }, - OrderBy: []discovery_kit_api.OrderBy{ - { - Attribute: "steadybit.label", - Direction: "ASC", - }, - }, - }, - } -} - -func (r *kafkaBrokerDiscovery) DescribeEnrichmentRules() []discovery_kit_api.TargetEnrichmentRule { - return []discovery_kit_api.TargetEnrichmentRule{ - getBrokerToPodEnrichmentRule(), - getBrokerToContainerEnrichmentRule(), - } -} - -func getBrokerToPodEnrichmentRule() discovery_kit_api.TargetEnrichmentRule { - return discovery_kit_api.TargetEnrichmentRule{ - Id: "com.steadybit.extension_kafka.kafka-broker-to-pod", - Version: extbuild.GetSemverVersionStringOrUnknown(), - Src: discovery_kit_api.SourceOrDestination{ - Type: kafkaBrokerTargetId, - Selector: map[string]string{ - "kafka.pod.name": "${dest.k8s.pod.name}", - "kafka.pod.namespace": "${dest.k8s.namespace}", - }, - }, - Dest: discovery_kit_api.SourceOrDestination{ - Type: "com.steadybit.extension_kubernetes.kubernetes-pod", - Selector: map[string]string{ - "k8s.pod.name": "${src.kafka.pod.name}", - "k8s.namespace": "${src.kafka.pod.namespace}", - }, - }, - Attributes: []discovery_kit_api.Attribute{ - { - Matcher: discovery_kit_api.Equals, - Name: "kafka.broker.node-id", - }, - { - Matcher: discovery_kit_api.Equals, - Name: "kafka.broker.is-controller", - }, - }, - } -} - -func getBrokerToContainerEnrichmentRule() discovery_kit_api.TargetEnrichmentRule { - return discovery_kit_api.TargetEnrichmentRule{ - Id: "com.steadybit.extension_kafka.kafka-broker-to-container", - Version: extbuild.GetSemverVersionStringOrUnknown(), - Src: discovery_kit_api.SourceOrDestination{ - Type: kafkaBrokerTargetId, - Selector: map[string]string{ - "kafka.pod.name": "${dest.k8s.pod.name}", - "kafka.pod.namespace": "${dest.k8s.namespace}", - }, - }, - Dest: discovery_kit_api.SourceOrDestination{ - Type: "com.steadybit.extension_container.container", - Selector: map[string]string{ - "k8s.pod.name": "${src.kafka.pod.name}", - "k8s.namespace": "${src.kafka.pod.namespace}", - }, - }, - Attributes: []discovery_kit_api.Attribute{ - { - Matcher: discovery_kit_api.Equals, - Name: "kafka.broker.node-id", - }, - { - Matcher: discovery_kit_api.Equals, - Name: "kafka.broker.is-controller", - }, - }, - } -} - -func (r *kafkaBrokerDiscovery) DescribeAttributes() []discovery_kit_api.AttributeDescription { - return []discovery_kit_api.AttributeDescription{ - { - Attribute: "kafka.cluster.name", - Label: discovery_kit_api.PluralLabel{ - One: "Kafka cluster name", - Other: "Kafka cluster names", - }, - }, - { - Attribute: "kafka.broker.node-id", - Label: discovery_kit_api.PluralLabel{ - One: "Kafka broker node id", - Other: "Kafka broker node ids", - }, - }, - { - Attribute: "kafka.broker.is-controller", - Label: discovery_kit_api.PluralLabel{ - One: "Kafka broker controller", - Other: "Kafka broker controller", - }, - }, - { - Attribute: "kafka.broker.host", - Label: discovery_kit_api.PluralLabel{ - One: "Kafka broker host", - Other: "Kafka broker hosts", - }, - }, - { - Attribute: "kafka.broker.port", - Label: discovery_kit_api.PluralLabel{ - One: "Kafka broker port", - Other: "Kafka broker ports", - }, - }, - { - Attribute: "kafka.broker.rack", - Label: discovery_kit_api.PluralLabel{ - One: "Kafka broker rack", - Other: "Kafka broker racks", - }, - }, - { - Attribute: "kafka.pod.name", - Label: discovery_kit_api.PluralLabel{ - One: "Kafka pod name", - Other: "Kafka pod names", - }, - }, - { - Attribute: "kafka.pod.namespace", - Label: discovery_kit_api.PluralLabel{ - One: "Kafka pod namespace", - Other: "Kafka pod namespaces", - }, - }, - } -} - -func (r *kafkaBrokerDiscovery) DiscoverTargets(ctx context.Context) ([]discovery_kit_api.Target, error) { - return getAllBrokers(ctx) -} - -func getAllBrokers(ctx context.Context) ([]discovery_kit_api.Target, error) { - result := make([]discovery_kit_api.Target, 0, 20) - - client, err := createNewAdminClient(strings.Split(config.Config.SeedBrokers, ",")) - if err != nil { - return nil, fmt.Errorf("failed to initialize kafka client: %s", err.Error()) - } - defer client.Close() - - // Create topic "franz-go" if it doesn't exist already - brokerDetails, err := client.ListBrokers(ctx) - if err != nil { - return nil, fmt.Errorf("failed to list brokers: %v", err) - } - metadata, err := client.BrokerMetadata(ctx) - if err != nil { - return nil, fmt.Errorf("failed to get brokers metadata : %v", err) - } - log.Debug().Msgf("Number of brokers discovered: %d", len(brokerDetails)) - log.Debug().Msgf("Node IDs discovered: %v", brokerDetails.NodeIDs()) - for _, broker := range brokerDetails { - result = append(result, toBrokerTarget(broker, metadata.Controller, metadata.Cluster)) - } - - return discovery_kit_commons.ApplyAttributeExcludes(result, config.Config.DiscoveryAttributesExcludesBrokers), nil -} - -func toBrokerTarget(broker kadm.BrokerDetail, controller int32, clusterName string) discovery_kit_api.Target { - id := broker.Host + "-" + strconv.Itoa(int(broker.Port)) - label := broker.Host - - attributes := make(map[string][]string) - attributes["kafka.cluster.name"] = []string{clusterName} - attributes["kafka.broker.node-id"] = []string{fmt.Sprintf("%v", broker.NodeID)} - attributes["kafka.broker.is-controller"] = []string{"false"} - if broker.NodeID == controller { - attributes["kafka.broker.is-controller"] = []string{"true"} - } - attributes["kafka.broker.host"] = []string{label} - attributes["kafka.broker.port"] = []string{fmt.Sprintf("%v", broker.Port)} - if broker.Rack != nil { - attributes["kafka.broker.rack"] = []string{*broker.Rack} - } - if len(strings.Split(broker.Host, ".")) == 4 && strings.HasSuffix(broker.Host, ".svc") { - podName := strings.Split(broker.Host, ".")[0] - namespace := strings.Split(broker.Host, ".")[2] - - attributes["kafka.pod.name"] = []string{podName} - attributes["kafka.pod.namespace"] = []string{namespace} - } - - return discovery_kit_api.Target{ - Id: id, - Label: label, - TargetType: kafkaBrokerTargetId, - Attributes: attributes, - } -} diff --git a/extkafka/check_brokers.go b/extkafka/check_brokers.go deleted file mode 100644 index 457c33b..0000000 --- a/extkafka/check_brokers.go +++ /dev/null @@ -1,329 +0,0 @@ -// SPDX-License-Identifier: MIT -// SPDX-FileCopyrightText: 2022 Steadybit GmbH - -package extkafka - -import ( - "context" - "fmt" - "github.com/steadybit/extension-kafka/config" - "strings" - - "github.com/steadybit/action-kit/go/action_kit_api/v2" - "github.com/steadybit/action-kit/go/action_kit_sdk" - extension_kit "github.com/steadybit/extension-kit" - "github.com/steadybit/extension-kit/extbuild" - "github.com/steadybit/extension-kit/extutil" - - "slices" - "sort" - "time" -) - -type CheckBrokersAction struct{} - -type CheckBrokersState struct { - PreviousController int32 - BrokerNodes []int32 - End time.Time - ExpectedChanges []string - StateCheckMode string - StateCheckSuccess bool - BrokerHosts []string -} - -const ( - BrokerControllerChanged = "kafka controller changed" - BrokerDowntime = "kafka broker with downtime" -) - -// Make sure action implements all required interfaces -var ( - _ action_kit_sdk.Action[CheckBrokersState] = (*CheckBrokersAction)(nil) - _ action_kit_sdk.ActionWithStatus[CheckBrokersState] = (*CheckBrokersAction)(nil) -) - -func NewBrokersCheckAction() action_kit_sdk.Action[CheckBrokersState] { - return &CheckBrokersAction{} -} - -func (m *CheckBrokersAction) NewEmptyState() CheckBrokersState { - return CheckBrokersState{} -} - -func (m *CheckBrokersAction) Describe() action_kit_api.ActionDescription { - return action_kit_api.ActionDescription{ - Id: fmt.Sprintf("%s.check", kafkaBrokerTargetId), - Label: "Check Brokers", - Description: "Check activity of brokers.", - Version: extbuild.GetSemverVersionStringOrUnknown(), - Icon: extutil.Ptr(kafkaIcon), - Technology: extutil.Ptr("Kafka"), - Category: extutil.Ptr("Kafka"), - Kind: action_kit_api.Check, - TimeControl: action_kit_api.TimeControlInternal, - Parameters: []action_kit_api.ActionParameter{ - { - Name: "duration", - Label: "Duration", - Description: extutil.Ptr(""), - Type: action_kit_api.ActionParameterTypeDuration, - DefaultValue: extutil.Ptr("30s"), - Required: extutil.Ptr(true), - }, - { - Name: "expectedChanges", - Label: "Expected Changes", - Description: extutil.Ptr(""), - Type: action_kit_api.ActionParameterTypeStringArray, - Options: extutil.Ptr([]action_kit_api.ParameterOption{ - action_kit_api.ExplicitParameterOption{ - Label: "New Controller Elected", - Value: BrokerControllerChanged, - }, - action_kit_api.ExplicitParameterOption{ - Label: "Broker downtime", - Value: BrokerDowntime, - }, - }), - Required: extutil.Ptr(false), - }, - { - Name: "changeCheckMode", - Label: "Change Check Mode", - Description: extutil.Ptr("How do we check the change of the broker?"), - Type: action_kit_api.ActionParameterTypeString, - DefaultValue: extutil.Ptr(stateCheckModeAllTheTime), - Options: extutil.Ptr([]action_kit_api.ParameterOption{ - action_kit_api.ExplicitParameterOption{ - Label: "All the time", - Value: stateCheckModeAllTheTime, - }, - action_kit_api.ExplicitParameterOption{ - Label: "At least once", - Value: stateCheckModeAtLeastOnce, - }, - }), - Required: extutil.Ptr(true), - }, - }, - Widgets: extutil.Ptr([]action_kit_api.Widget{ - action_kit_api.StateOverTimeWidget{ - Type: action_kit_api.ComSteadybitWidgetStateOverTime, - Title: "Kafka Broker Changes", - Identity: action_kit_api.StateOverTimeWidgetIdentityConfig{ - From: "metric.id", - }, - Label: action_kit_api.StateOverTimeWidgetLabelConfig{ - From: "metric.id", - }, - State: action_kit_api.StateOverTimeWidgetStateConfig{ - From: "state", - }, - Tooltip: action_kit_api.StateOverTimeWidgetTooltipConfig{ - From: "tooltip", - }, - Url: extutil.Ptr(action_kit_api.StateOverTimeWidgetUrlConfig{ - From: extutil.Ptr("url"), - }), - Value: extutil.Ptr(action_kit_api.StateOverTimeWidgetValueConfig{ - Hide: extutil.Ptr(true), - }), - }, - }), - Status: extutil.Ptr(action_kit_api.MutatingEndpointReferenceWithCallInterval{ - CallInterval: extutil.Ptr("2s"), - }), - } -} - -func (m *CheckBrokersAction) Prepare(ctx context.Context, state *CheckBrokersState, request action_kit_api.PrepareActionRequestBody) (*action_kit_api.PrepareResult, error) { - duration := request.Config["duration"].(float64) - end := time.Now().Add(time.Millisecond * time.Duration(duration)) - - var expectedState []string - if request.Config["expectedChanges"] != nil { - expectedState = extutil.ToStringArray(request.Config["expectedChanges"]) - } - - var stateCheckMode string - if request.Config["changeCheckMode"] != nil { - stateCheckMode = fmt.Sprintf("%v", request.Config["changeCheckMode"]) - } - - state.BrokerHosts = strings.Split(config.Config.SeedBrokers, ",") - - client, err := createNewAdminClient(state.BrokerHosts) - if err != nil { - return nil, fmt.Errorf("failed to initialize kafka client: %s", err.Error()) - } - defer client.Close() - - metadata, err := client.BrokerMetadata(ctx) - if err != nil { - return nil, extutil.Ptr(extension_kit.ToError(fmt.Sprintf("Failed to retrieve brokers from Kafka. Full response: %v", err), err)) - } - - state.End = end - state.ExpectedChanges = expectedState - state.StateCheckMode = stateCheckMode - state.StateCheckSuccess = false - state.PreviousController = metadata.Controller - state.BrokerNodes = metadata.Brokers.NodeIDs() - - return nil, nil -} - -func (m *CheckBrokersAction) Start(_ context.Context, _ *CheckBrokersState) (*action_kit_api.StartResult, error) { - return nil, nil -} - -func (m *CheckBrokersAction) Status(ctx context.Context, state *CheckBrokersState) (*action_kit_api.StatusResult, error) { - return BrokerCheckStatus(ctx, state) -} - -func BrokerCheckStatus(ctx context.Context, state *CheckBrokersState) (*action_kit_api.StatusResult, error) { - now := time.Now() - - client, err := createNewAdminClient(state.BrokerHosts) - if err != nil { - return nil, fmt.Errorf("failed to initialize kafka client: %s", err.Error()) - } - defer client.Close() - - metadata, err := client.BrokerMetadata(ctx) - if err != nil { - return nil, extutil.Ptr(extension_kit.ToError(fmt.Sprintf("Failed to retrieve brokers from Kafka. Full response: %v", err), err)) - } - - // Check for changes - changes := make(map[string][]int32) - - if metadata.Controller != state.PreviousController { - state.PreviousController = metadata.Controller - changes[BrokerControllerChanged] = []int32{metadata.Controller} - } - - if !areSlicesEqualUnordered(state.BrokerNodes, metadata.Brokers.NodeIDs()) { - changes[BrokerDowntime] = findMissingElements(state.BrokerNodes, metadata.Brokers.NodeIDs()) - } - - completed := now.After(state.End) - var checkError *action_kit_api.ActionKitError - - keys := make([]string, 0, len(changes)) - for k := range changes { - keys = append(keys, k) - } - - if len(state.ExpectedChanges) > 0 { - if state.StateCheckMode == stateCheckModeAllTheTime { - for _, c := range keys { - if !slices.Contains(state.ExpectedChanges, c) { - checkError = extutil.Ptr(action_kit_api.ActionKitError{ - Title: fmt.Sprintf("Brokers got an unexpected change '%s' whereas '%s' is expected.", - c, - state.ExpectedChanges), - Status: extutil.Ptr(action_kit_api.Failed), - }) - } - } - } else if state.StateCheckMode == stateCheckModeAtLeastOnce { - for _, c := range keys { - if slices.Contains(state.ExpectedChanges, c) { - state.StateCheckSuccess = true - } - } - - if completed && !state.StateCheckSuccess { - checkError = extutil.Ptr(action_kit_api.ActionKitError{ - Title: fmt.Sprintf("Brokers didn't get the expected changes '%s' at least once.", - state.ExpectedChanges), - Status: extutil.Ptr(action_kit_api.Failed), - }) - } - } - } - - metrics := []action_kit_api.Metric{ - *toBrokerChangeMetric(state.ExpectedChanges, keys, changes, now), - } - - return &action_kit_api.StatusResult{ - Completed: completed, - Error: checkError, - Metrics: extutil.Ptr(metrics), - }, nil -} - -func toBrokerChangeMetric(expectedChanges []string, changesNames []string, changes map[string][]int32, now time.Time) *action_kit_api.Metric { - var tooltip string - var state string - - if len(changes) > 0 { - recap := "BROKER ACTIVITY" - for k, v := range changes { - recap += "\n" + k + ":\n" - for _, nodeID := range v { - recap += fmt.Sprint(nodeID) + "\n" - } - - } - - tooltip = recap - - sort.Strings(expectedChanges) - sort.Strings(changesNames) - - state = "warn" - for _, change := range changesNames { - if slices.Contains(expectedChanges, change) { - state = "success" - } - } - } else { - tooltip = "No changes" - state = "info" - } - - return extutil.Ptr(action_kit_api.Metric{ - Name: extutil.Ptr("kafka_consumer_group_state"), - Metric: map[string]string{ - "metric.id": fmt.Sprintf("Expected: %s", strings.Join(expectedChanges, ",")), - "url": "", - "state": state, - "tooltip": tooltip, - }, - Timestamp: now, - Value: 0, - }) -} - -func areSlicesEqualUnordered(slice1, slice2 []int32) bool { - if len(slice1) != len(slice2) { - return false - } - sorted1 := slices.Clone(slice1) // Create a copy to avoid modifying the original slices - sorted2 := slices.Clone(slice2) - sort.Slice(sorted1, func(i, j int) bool { return sorted1[i] < sorted1[j] }) - sort.Slice(sorted2, func(i, j int) bool { return sorted2[i] < sorted2[j] }) - return slices.Equal(sorted1, sorted2) -} - -func findMissingElements(slice1, slice2 []int32) []int32 { - // Create a map to store elements in slice2 - elementMap := make(map[int32]bool) - for _, v := range slice2 { - elementMap[v] = true - } - - // Find elements in slice1 that are not in slice2 - var missing []int32 - for _, v := range slice1 { - if !elementMap[v] { - missing = append(missing, v) - } - } - - return missing -} diff --git a/extkafka/check_brokers_test.go b/extkafka/check_brokers_test.go deleted file mode 100644 index 816fe0e..0000000 --- a/extkafka/check_brokers_test.go +++ /dev/null @@ -1,206 +0,0 @@ -// SPDX-License-Identifier: MIT -// SPDX-FileCopyrightText: 2025 Steadybit GmbH - -package extkafka - -import ( - "context" - "fmt" - "github.com/google/uuid" - "github.com/steadybit/action-kit/go/action_kit_api/v2" - "github.com/steadybit/extension-kafka/config" - "github.com/steadybit/extension-kit/extutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/twmb/franz-go/pkg/kfake" - "github.com/twmb/franz-go/pkg/kgo" - "strings" - "testing" - "time" -) - -func TestCheckBrokers_Describe(t *testing.T) { - //Given - action := CheckBrokersAction{} - - //When - response := action.Describe() - - //Then - assert.Equal(t, "Check activity of brokers.", response.Description) - assert.Equal(t, "Check Brokers", response.Label) - assert.Equal(t, fmt.Sprintf("%s.check", kafkaBrokerTargetId), response.Id) - assert.Equal(t, extutil.Ptr("Kafka"), response.Technology) -} - -func TestCheckBrokers_Prepare(t *testing.T) { - c, err := kfake.NewCluster( - kfake.SeedTopics(-1, "steadybit"), - kfake.NumBrokers(3), - ) - require.NoError(t, err) - defer c.Close() - - seeds := c.ListenAddrs() - config.Config.SeedBrokers = strings.Join(seeds, ",") - - tests := []struct { - name string - requestBody action_kit_api.PrepareActionRequestBody - wantedError error - wantedState *CheckBrokersState - }{ - { - name: "Should return config", - requestBody: extutil.JsonMangle(action_kit_api.PrepareActionRequestBody{ - Config: map[string]interface{}{ - "expectedChanges": []string{"test"}, - "changeCheckMode": "allTheTime", - "duration": 10000, - }, - ExecutionId: uuid.New(), - }), - - wantedState: &CheckBrokersState{ - ExpectedChanges: []string{"test"}, - StateCheckMode: "allTheTime", - StateCheckSuccess: false, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - //Given - action := CheckBrokersAction{} - state := CheckBrokersState{} - request := tt.requestBody - //When - _, err := action.Prepare(context.TODO(), &state, request) - - //Then - if tt.wantedError != nil { - assert.EqualError(t, err, tt.wantedError.Error()) - } - if tt.wantedState != nil { - assert.NoError(t, err) - assert.Equal(t, tt.wantedState.ExpectedChanges, state.ExpectedChanges) - assert.Equal(t, tt.wantedState.StateCheckMode, state.StateCheckMode) - assert.Equal(t, tt.wantedState.StateCheckSuccess, state.StateCheckSuccess) - assert.NotNil(t, state.End) - } - }) - } -} - -func TestCheckBrokers_Status(t *testing.T) { - c, err := kfake.NewCluster( - kfake.SeedTopics(-1, "steadybit"), - kfake.NumBrokers(3), - ) - require.NoError(t, err) - defer c.Close() - - seeds := c.ListenAddrs() - config.Config.SeedBrokers = strings.Join(seeds, ",") - // One client can both produce and consume! - // Consuming can either be direct (no consumer group), or through a group. Below, we use a group. - cl, err := kgo.NewClient( - kgo.SeedBrokers(seeds...), - kgo.ConsumerGroup("steadybit"), - kgo.ConsumeTopics("steadybit"), - ) - require.NoError(t, err) - defer cl.Close() - - tests := []struct { - name string - killNode *int - requestBody action_kit_api.PrepareActionRequestBody - wantedError error - wantedState *CheckBrokersState - }{ - { - name: "Should return status ok", - killNode: extutil.Ptr(1), - requestBody: extutil.JsonMangle(action_kit_api.PrepareActionRequestBody{ - Target: &action_kit_api.Target{ - Attributes: map[string][]string{ - "kafka.consumer-group.name": {"steadybit"}, - }, - }, - Config: map[string]interface{}{ - "expectedChanges": []string{"kafka broker with downtime"}, - "changeCheckMode": "atLeastOnce", - "duration": 5000, - }, - ExecutionId: uuid.New(), - }), - - wantedState: &CheckBrokersState{ - StateCheckMode: "atLeastOnce", - StateCheckSuccess: true, - }, - }, - { - name: "Should return status ok with all the time check mode", - killNode: extutil.Ptr(2), - requestBody: extutil.JsonMangle(action_kit_api.PrepareActionRequestBody{ - Target: &action_kit_api.Target{ - Attributes: map[string][]string{ - "kafka.consumer-group.name": {"steadybit"}, - }, - }, - Config: map[string]interface{}{ - "expectedChanges": []string{"Dead"}, - "changeCheckMode": "allTheTime", - "duration": 5000, - }, - ExecutionId: uuid.New(), - }), - - wantedState: &CheckBrokersState{ - ExpectedChanges: []string{"kafka broker with downtime"}, - StateCheckMode: "allTheTime", - StateCheckSuccess: false, - BrokerNodes: []int32{1, 2, 3}, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - //Given - action := CheckBrokersAction{} - state := CheckBrokersState{} - request := tt.requestBody - //When - _, errPrepare := action.Prepare(t.Context(), &state, request) - statusResult, errStatus := action.Status(t.Context(), &state) - - //Then - if tt.wantedState != nil { - assert.NoError(t, errPrepare) - assert.NoError(t, errStatus) - assert.Equal(t, tt.wantedState.StateCheckMode, state.StateCheckMode) - assert.False(t, statusResult.Completed) - assert.NotNil(t, state.End) - } - - if tt.wantedError != nil { - err := c.RemoveNode(int32(*tt.killNode)) - require.NoError(t, err) - } - time.Sleep(6 * time.Second) - - // Completed - statusResult, errStatus = action.Status(t.Context(), &state) - //Then - if tt.wantedState != nil { - assert.NoError(t, errPrepare) - assert.NoError(t, errStatus) - assert.Equal(t, tt.wantedState.StateCheckMode, state.StateCheckMode) - assert.True(t, statusResult.Completed) - assert.NotNil(t, state.End) - } - }) - } -} diff --git a/extkafka/check_consumer_group.go b/extkafka/check_consumer_group.go deleted file mode 100644 index fe87331..0000000 --- a/extkafka/check_consumer_group.go +++ /dev/null @@ -1,285 +0,0 @@ -// SPDX-License-Identifier: MIT -// SPDX-FileCopyrightText: 2025 Steadybit GmbH - -package extkafka - -import ( - "context" - "fmt" - "github.com/rs/zerolog/log" - "github.com/steadybit/action-kit/go/action_kit_api/v2" - "github.com/steadybit/action-kit/go/action_kit_sdk" - "github.com/steadybit/extension-kafka/config" - extension_kit "github.com/steadybit/extension-kit" - "github.com/steadybit/extension-kit/extbuild" - "github.com/steadybit/extension-kit/extutil" - "github.com/twmb/franz-go/pkg/kadm" - "slices" - "strings" - "time" -) - -type ConsumerGroupCheckAction struct{} - -type ConsumerGroupCheckState struct { - ConsumerGroupName string - TopicName string - End time.Time - ExpectedState []string - StateCheckMode string - StateCheckSuccess bool - BrokerHosts []string -} - -// Make sure action implements all required interfaces -var ( - _ action_kit_sdk.Action[ConsumerGroupCheckState] = (*ConsumerGroupCheckAction)(nil) - _ action_kit_sdk.ActionWithStatus[ConsumerGroupCheckState] = (*ConsumerGroupCheckAction)(nil) -) - -func NewConsumerGroupCheckAction() action_kit_sdk.Action[ConsumerGroupCheckState] { - return &ConsumerGroupCheckAction{} -} - -func (m *ConsumerGroupCheckAction) NewEmptyState() ConsumerGroupCheckState { - return ConsumerGroupCheckState{} -} - -func (m *ConsumerGroupCheckAction) Describe() action_kit_api.ActionDescription { - return action_kit_api.ActionDescription{ - Id: fmt.Sprintf("%s.check", kafkaConsumerTargetId), - Label: "Check Consumer State", - Description: "Check the consumer state", - Version: extbuild.GetSemverVersionStringOrUnknown(), - Icon: extutil.Ptr(kafkaIcon), - TargetSelection: extutil.Ptr(action_kit_api.TargetSelection{ - TargetType: kafkaConsumerTargetId, - QuantityRestriction: extutil.Ptr(action_kit_api.QuantityRestrictionAll), - SelectionTemplates: extutil.Ptr([]action_kit_api.TargetSelectionTemplate{ - { - Label: "consumer group name", - Description: extutil.Ptr("Find consumer group by name"), - Query: "kafka.consumer-group.name=\"\"", - }, - }), - }), - Technology: extutil.Ptr("Kafka"), - Category: extutil.Ptr("Kafka"), - Kind: action_kit_api.Check, - TimeControl: action_kit_api.TimeControlInternal, - Parameters: []action_kit_api.ActionParameter{ - { - Name: "duration", - Label: "Duration", - Description: extutil.Ptr(""), - Type: action_kit_api.ActionParameterTypeDuration, - DefaultValue: extutil.Ptr("30s"), - Required: extutil.Ptr(true), - }, - { - Name: "expectedStateList", - Label: "Expected State List", - Description: extutil.Ptr(""), - Type: action_kit_api.ActionParameterTypeStringArray, - Options: extutil.Ptr([]action_kit_api.ParameterOption{ - action_kit_api.ExplicitParameterOption{ - Label: "Unknown", - Value: "Unknown", - }, - action_kit_api.ExplicitParameterOption{ - Label: "PreparingRebalance", - Value: "PreparingRebalance", - }, - action_kit_api.ExplicitParameterOption{ - Label: "CompletingRebalance", - Value: "CompletingRebalance", - }, - action_kit_api.ExplicitParameterOption{ - Label: "Stable", - Value: "Stable", - }, - action_kit_api.ExplicitParameterOption{ - Label: "Dead", - Value: "Dead", - }, - action_kit_api.ExplicitParameterOption{ - Label: "Empty", - Value: "Empty", - }, - }), - Required: extutil.Ptr(false), - }, - { - Name: "stateCheckMode", - Label: "State Check Mode", - Description: extutil.Ptr("How often should the state be checked ?"), - Type: action_kit_api.ActionParameterTypeString, - DefaultValue: extutil.Ptr(stateCheckModeAllTheTime), - Options: extutil.Ptr([]action_kit_api.ParameterOption{ - action_kit_api.ExplicitParameterOption{ - Label: "All the time", - Value: stateCheckModeAllTheTime, - }, - action_kit_api.ExplicitParameterOption{ - Label: "At least once", - Value: stateCheckModeAtLeastOnce, - }, - }), - Required: extutil.Ptr(true), - }, - }, - Widgets: extutil.Ptr([]action_kit_api.Widget{ - action_kit_api.StateOverTimeWidget{ - Type: action_kit_api.ComSteadybitWidgetStateOverTime, - Title: "Kafka Consumer Group State", - Identity: action_kit_api.StateOverTimeWidgetIdentityConfig{ - From: "kafka.consumer-group.name", - }, - Label: action_kit_api.StateOverTimeWidgetLabelConfig{ - From: "kafka.consumer-group.name", - }, - State: action_kit_api.StateOverTimeWidgetStateConfig{ - From: "state", - }, - Tooltip: action_kit_api.StateOverTimeWidgetTooltipConfig{ - From: "tooltip", - }, - Url: extutil.Ptr(action_kit_api.StateOverTimeWidgetUrlConfig{ - From: extutil.Ptr("url"), - }), - Value: extutil.Ptr(action_kit_api.StateOverTimeWidgetValueConfig{ - Hide: extutil.Ptr(true), - }), - }, - }), - Status: extutil.Ptr(action_kit_api.MutatingEndpointReferenceWithCallInterval{ - CallInterval: extutil.Ptr("1s"), - }), - } -} - -func (m *ConsumerGroupCheckAction) Prepare(_ context.Context, state *ConsumerGroupCheckState, request action_kit_api.PrepareActionRequestBody) (*action_kit_api.PrepareResult, error) { - if len(request.Target.Attributes["kafka.consumer-group.name"]) == 0 { - return nil, fmt.Errorf("the target is missing the kafka.consumer-group.name attribute") - } - - duration := request.Config["duration"].(float64) - end := time.Now().Add(time.Millisecond * time.Duration(duration)) - - var expectedState []string - if request.Config["expectedStateList"] != nil { - expectedState = extutil.ToStringArray(request.Config["expectedStateList"]) - } - - var stateCheckMode string - if request.Config["stateCheckMode"] != nil { - stateCheckMode = fmt.Sprintf("%v", request.Config["stateCheckMode"]) - } - - state.ConsumerGroupName = request.Target.Attributes["kafka.consumer-group.name"][0] - state.End = end - state.ExpectedState = expectedState - state.StateCheckMode = stateCheckMode - state.BrokerHosts = strings.Split(config.Config.SeedBrokers, ",") - - return nil, nil -} - -func (m *ConsumerGroupCheckAction) Start(_ context.Context, _ *ConsumerGroupCheckState) (*action_kit_api.StartResult, error) { - return nil, nil -} - -func (m *ConsumerGroupCheckAction) Status(ctx context.Context, state *ConsumerGroupCheckState) (*action_kit_api.StatusResult, error) { - return ConsumerGroupCheckStatus(ctx, state) -} - -func ConsumerGroupCheckStatus(ctx context.Context, state *ConsumerGroupCheckState) (*action_kit_api.StatusResult, error) { - now := time.Now() - - client, err := createNewAdminClient(state.BrokerHosts) - if err != nil { - return nil, fmt.Errorf("failed to initialize kafka client: %s", err.Error()) - } - defer client.Close() - - groups, err := client.DescribeGroups(ctx, state.ConsumerGroupName) - if err != nil { - return nil, extutil.Ptr(extension_kit.ToError(fmt.Sprintf("Failed to retrieve consumer groups from Kafka for name %s. Full response: %v", state.ConsumerGroupName, err), err)) - } - - var group kadm.DescribedGroup - if len(groups.Sorted()) == 0 { - log.Err(err).Msgf("No consumer group with that name %s.", state.ConsumerGroupName) - } else if len(groups.Sorted()) > 1 { - log.Err(err).Msgf("More than 1 consumer group with that name %s.", state.ConsumerGroupName) - } else { - group = groups.Sorted()[0] - } - - completed := now.After(state.End) - var checkError *action_kit_api.ActionKitError - - if len(state.ExpectedState) > 0 { - if state.StateCheckMode == stateCheckModeAllTheTime { - if !slices.Contains(state.ExpectedState, group.State) { - checkError = extutil.Ptr(action_kit_api.ActionKitError{ - Title: fmt.Sprintf("Consumer Group '%s' has state '%s' whereas '%s' is expected.", - group.Group, - group.State, - state.ExpectedState), - Status: extutil.Ptr(action_kit_api.Failed), - }) - } - } else if state.StateCheckMode == stateCheckModeAtLeastOnce { - if slices.Contains(state.ExpectedState, group.State) { - state.StateCheckSuccess = true - } - if completed && !state.StateCheckSuccess { - checkError = extutil.Ptr(action_kit_api.ActionKitError{ - Title: fmt.Sprintf("Consumer Group '%s' didn't have status '%s' at least once.", - group.Group, - state.ExpectedState), - Status: extutil.Ptr(action_kit_api.Failed), - }) - } - } - } - - metrics := []action_kit_api.Metric{ - *toConsumerGroupMetric(group, now), - } - - return &action_kit_api.StatusResult{ - Completed: completed, - Error: checkError, - Metrics: extutil.Ptr(metrics), - }, nil -} - -func toConsumerGroupMetric(group kadm.DescribedGroup, now time.Time) *action_kit_api.Metric { - var tooltip string - var state string - - tooltip = fmt.Sprintf("Consumer group state is: %s", group.State) - if group.State == "Stable" { - state = "success" - } else if group.State == "Empty" { - state = "warn" - } else if group.State == "PreparingRebalance" { - state = "warn" - } else if group.State == "Dead" { - state = "danger" - } - - return extutil.Ptr(action_kit_api.Metric{ - Name: extutil.Ptr("kafka_consumer_group_state"), - Metric: map[string]string{ - "kafka.consumer-group.name": group.Group, - "url": "", - "state": state, - "tooltip": tooltip, - }, - Timestamp: now, - Value: 0, - }) -} diff --git a/extkafka/check_consumer_group_test.go b/extkafka/check_consumer_group_test.go deleted file mode 100644 index 889c78d..0000000 --- a/extkafka/check_consumer_group_test.go +++ /dev/null @@ -1,221 +0,0 @@ -// SPDX-License-Identifier: MIT -// SPDX-FileCopyrightText: 2025 Steadybit GmbH - -package extkafka - -import ( - "fmt" - "github.com/google/uuid" - "github.com/steadybit/action-kit/go/action_kit_api/v2" - "github.com/steadybit/extension-kafka/config" - extension_kit "github.com/steadybit/extension-kit" - "github.com/steadybit/extension-kit/extutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/twmb/franz-go/pkg/kfake" - "github.com/twmb/franz-go/pkg/kgo" - "strings" - "testing" - "time" -) - -func TestCheckConsumerGroup_Describe(t *testing.T) { - //Given - action := ConsumerGroupCheckAction{} - - //When - response := action.Describe() - - //Then - assert.Equal(t, "Check the consumer state", response.Description) - assert.Equal(t, "Check Consumer State", response.Label) - assert.Equal(t, kafkaConsumerTargetId, response.TargetSelection.TargetType) - assert.Equal(t, fmt.Sprintf("%s.check", kafkaConsumerTargetId), response.Id) - assert.Equal(t, extutil.Ptr("Kafka"), response.Technology) -} - -func TestCheckConsumerGroup_Prepare(t *testing.T) { - tests := []struct { - name string - requestBody action_kit_api.PrepareActionRequestBody - wantedError error - wantedState *ConsumerGroupCheckState - }{ - { - name: "Should return config", - requestBody: extutil.JsonMangle(action_kit_api.PrepareActionRequestBody{ - Target: &action_kit_api.Target{ - Attributes: map[string][]string{ - "kafka.consumer-group.name": {"steadybit"}, - }, - }, - Config: map[string]interface{}{ - "expectedStateList": []string{"test"}, - "stateCheckMode": "test", - "duration": 10000, - }, - ExecutionId: uuid.New(), - }), - - wantedState: &ConsumerGroupCheckState{ - ConsumerGroupName: "steadybit", - ExpectedState: []string{"test"}, - StateCheckMode: "test", - StateCheckSuccess: true, - TopicName: "steadybit", - }, - }, - { - name: "Should return error for consumer group name", - requestBody: extutil.JsonMangle(action_kit_api.PrepareActionRequestBody{ - Target: &action_kit_api.Target{ - Attributes: map[string][]string{}, - }, - Config: map[string]interface{}{ - "expectedStateList": []string{"test"}, - "stateCheckMode": "test", - "duration": 10000, - }, - ExecutionId: uuid.New(), - }), - - wantedError: extension_kit.ToError("the target is missing the kafka.consumer-group.name attribute", nil), - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - //Given - action := ConsumerGroupCheckAction{} - state := ConsumerGroupCheckState{} - request := tt.requestBody - - //When - _, err := action.Prepare(t.Context(), &state, request) - - //Then - if tt.wantedError != nil { - assert.EqualError(t, err, tt.wantedError.Error()) - } - if tt.wantedState != nil { - assert.NoError(t, err) - assert.Equal(t, tt.wantedState.StateCheckMode, state.StateCheckMode) - assert.Equal(t, tt.wantedState.ConsumerGroupName, state.ConsumerGroupName) - assert.Equal(t, tt.wantedState.ExpectedState, state.ExpectedState) - assert.False(t, state.StateCheckSuccess) - assert.NotNil(t, state.End) - } - }) - } -} - -func TestCheckConsumerGroup_Status(t *testing.T) { - c, err := kfake.NewCluster( - kfake.SeedTopics(-1, "steadybit"), - kfake.NumBrokers(3), - ) - require.NoError(t, err) - defer c.Close() - - seeds := c.ListenAddrs() - config.Config.SeedBrokers = strings.Join(seeds, ",") - // One client can both produce and consume! - // Consuming can either be direct (no consumer group), or through a group. Below, we use a group. - cl, err := kgo.NewClient( - kgo.SeedBrokers(seeds...), - kgo.ConsumerGroup("steadybit"), - kgo.ConsumeTopics("steadybit"), - ) - require.NoError(t, err) - defer cl.Close() - - tests := []struct { - name string - requestBody action_kit_api.PrepareActionRequestBody - wantedError error - wantedState *ConsumerGroupCheckState - }{ - { - name: "Should return status ok", - requestBody: extutil.JsonMangle(action_kit_api.PrepareActionRequestBody{ - Target: &action_kit_api.Target{ - Attributes: map[string][]string{ - "kafka.consumer-group.name": {"steadybit"}, - }, - }, - Config: map[string]interface{}{ - "expectedStateList": []string{"Dead"}, - "stateCheckMode": "atLeastOnce", - "duration": 5000, - }, - ExecutionId: uuid.New(), - }), - wantedState: &ConsumerGroupCheckState{ - ConsumerGroupName: "steadybit", - ExpectedState: []string{"Dead"}, - StateCheckMode: "atLeastOnce", - StateCheckSuccess: true, - TopicName: "steadybit", - }, - }, - { - name: "Should return status ok with all the time check mode", - requestBody: extutil.JsonMangle(action_kit_api.PrepareActionRequestBody{ - Target: &action_kit_api.Target{ - Attributes: map[string][]string{ - "kafka.consumer-group.name": {"steadybit"}, - }, - }, - Config: map[string]interface{}{ - "expectedStateList": []string{"Dead"}, - "stateCheckMode": "allTheTime", - "duration": 5000, - }, - ExecutionId: uuid.New(), - }), - wantedState: &ConsumerGroupCheckState{ - ConsumerGroupName: "steadybit", - ExpectedState: []string{"Dead"}, - StateCheckMode: "allTheTime", - StateCheckSuccess: false, - TopicName: "steadybit", - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - //Given - action := ConsumerGroupCheckAction{} - state := ConsumerGroupCheckState{} - request := tt.requestBody - - //When - _, errPrepare := action.Prepare(t.Context(), &state, request) - statusResult, errStatus := action.Status(t.Context(), &state) - - //Then - if tt.wantedState != nil { - assert.NoError(t, errPrepare) - assert.NoError(t, errStatus) - assert.Equal(t, tt.wantedState.StateCheckMode, state.StateCheckMode) - assert.Equal(t, tt.wantedState.ConsumerGroupName, state.ConsumerGroupName) - assert.False(t, statusResult.Completed) - assert.NotNil(t, state.End) - } - - time.Sleep(6 * time.Second) - - // Completed - statusResult, errStatus = action.Status(t.Context(), &state) - - //Then - if tt.wantedState != nil { - assert.NoError(t, errPrepare) - assert.NoError(t, errStatus) - assert.Equal(t, tt.wantedState.StateCheckMode, state.StateCheckMode) - assert.Equal(t, tt.wantedState.ConsumerGroupName, state.ConsumerGroupName) - assert.True(t, statusResult.Completed) - assert.NotNil(t, state.End) - } - }) - } -} diff --git a/extkafka/check_partitions.go b/extkafka/check_partitions.go deleted file mode 100644 index cd17fbf..0000000 --- a/extkafka/check_partitions.go +++ /dev/null @@ -1,356 +0,0 @@ -// SPDX-License-Identifier: MIT -// SPDX-FileCopyrightText: 2025 Steadybit GmbH - -package extkafka - -import ( - "context" - "fmt" - "github.com/rs/zerolog/log" - "github.com/steadybit/action-kit/go/action_kit_api/v2" - "github.com/steadybit/action-kit/go/action_kit_sdk" - "github.com/steadybit/extension-kafka/config" - extension_kit "github.com/steadybit/extension-kit" - "github.com/steadybit/extension-kit/extbuild" - "github.com/steadybit/extension-kit/extutil" - "github.com/twmb/franz-go/pkg/kadm" - "slices" - "sort" - "strings" - "time" -) - -type PartitionsCheckAction struct{} - -type PartitionsCheckState struct { - TopicName string - PreviousReplicas map[int32][]int32 - PreviousInSyncReplicas map[int32][]int32 - PreviousOfflineReplicas map[int32][]int32 - PreviousLeader map[int32]int32 - End time.Time - ExpectedChanges []string - StateCheckMode string - StateCheckSuccess bool - BrokerHosts []string -} - -const ( - LeaderChanged = "leader changed" - ReplicasChanged = "replicas changed" - OfflineReplicasChanged = "offline replicas changed" - InSyncReplicasChanged = "in sync replicas changed" -) - -// Make sure action implements all required interfaces -var ( - _ action_kit_sdk.Action[PartitionsCheckState] = (*PartitionsCheckAction)(nil) - _ action_kit_sdk.ActionWithStatus[PartitionsCheckState] = (*PartitionsCheckAction)(nil) -) - -func NewPartitionsCheckAction() action_kit_sdk.Action[PartitionsCheckState] { - return &PartitionsCheckAction{} -} - -func (m *PartitionsCheckAction) NewEmptyState() PartitionsCheckState { - return PartitionsCheckState{} -} - -func (m *PartitionsCheckAction) Describe() action_kit_api.ActionDescription { - return action_kit_api.ActionDescription{ - Id: fmt.Sprintf("%s.check-partitions", kafkaTopicTargetId), - Label: "Check Partitions", - Description: "Check topic partitions changes for leader, in-sync-replicas, replicas and offline replicas", - Version: extbuild.GetSemverVersionStringOrUnknown(), - Icon: extutil.Ptr(kafkaIcon), - TargetSelection: extutil.Ptr(action_kit_api.TargetSelection{ - TargetType: kafkaTopicTargetId, - QuantityRestriction: extutil.Ptr(action_kit_api.QuantityRestrictionAll), - SelectionTemplates: extutil.Ptr([]action_kit_api.TargetSelectionTemplate{ - { - Label: "topic name", - Description: extutil.Ptr("Find topic group by name"), - Query: "kafka.topic.name=\"\"", - }, - }), - }), - Technology: extutil.Ptr("Kafka"), - Category: extutil.Ptr("Kafka"), - Kind: action_kit_api.Check, - TimeControl: action_kit_api.TimeControlInternal, - Parameters: []action_kit_api.ActionParameter{ - { - Name: "duration", - Label: "Duration", - Description: extutil.Ptr(""), - Type: action_kit_api.ActionParameterTypeDuration, - DefaultValue: extutil.Ptr("30s"), - Required: extutil.Ptr(true), - }, - { - Name: "expectedChanges", - Label: "Expected Changes", - Description: extutil.Ptr(""), - Type: action_kit_api.ActionParameterTypeStringArray, - Options: extutil.Ptr([]action_kit_api.ParameterOption{ - action_kit_api.ExplicitParameterOption{ - Label: "New Leader Elected", - Value: LeaderChanged, - }, - action_kit_api.ExplicitParameterOption{ - Label: "Replicas Changed", - Value: ReplicasChanged, - }, - action_kit_api.ExplicitParameterOption{ - Label: "Offline Replicas Changed", - Value: OfflineReplicasChanged, - }, - action_kit_api.ExplicitParameterOption{ - Label: "In-Sync Replicas Changed", - Value: InSyncReplicasChanged, - }, - }), - Required: extutil.Ptr(false), - }, - { - Name: "changeCheckMode", - Label: "Change Check Mode", - Description: extutil.Ptr("How do we check the change of the topic?"), - Type: action_kit_api.ActionParameterTypeString, - DefaultValue: extutil.Ptr(stateCheckModeAllTheTime), - Options: extutil.Ptr([]action_kit_api.ParameterOption{ - action_kit_api.ExplicitParameterOption{ - Label: "All the time", - Value: stateCheckModeAllTheTime, - }, - action_kit_api.ExplicitParameterOption{ - Label: "At least once", - Value: stateCheckModeAtLeastOnce, - }, - }), - Required: extutil.Ptr(true), - }, - }, - Widgets: extutil.Ptr([]action_kit_api.Widget{ - action_kit_api.StateOverTimeWidget{ - Type: action_kit_api.ComSteadybitWidgetStateOverTime, - Title: "Kafka Topic Changes", - Identity: action_kit_api.StateOverTimeWidgetIdentityConfig{ - From: "kafka.topic.name", - }, - Label: action_kit_api.StateOverTimeWidgetLabelConfig{ - From: "kafka.topic.name", - }, - State: action_kit_api.StateOverTimeWidgetStateConfig{ - From: "state", - }, - Tooltip: action_kit_api.StateOverTimeWidgetTooltipConfig{ - From: "tooltip", - }, - Url: extutil.Ptr(action_kit_api.StateOverTimeWidgetUrlConfig{ - From: extutil.Ptr("url"), - }), - Value: extutil.Ptr(action_kit_api.StateOverTimeWidgetValueConfig{ - Hide: extutil.Ptr(true), - }), - }, - }), - Status: extutil.Ptr(action_kit_api.MutatingEndpointReferenceWithCallInterval{ - CallInterval: extutil.Ptr("2s"), - }), - } -} - -func (m *PartitionsCheckAction) Prepare(_ context.Context, state *PartitionsCheckState, request action_kit_api.PrepareActionRequestBody) (*action_kit_api.PrepareResult, error) { - if len(request.Target.Attributes["kafka.topic.name"]) == 0 { - return nil, fmt.Errorf("the target is missing the kafka.topic.name attribute") - } - state.TopicName = extutil.MustHaveValue(request.Target.Attributes, "kafka.topic.name")[0] - - duration := request.Config["duration"].(float64) - end := time.Now().Add(time.Millisecond * time.Duration(duration)) - - var expectedState []string - if request.Config["expectedChanges"] != nil { - expectedState = extutil.ToStringArray(request.Config["expectedChanges"]) - } - - var stateCheckMode string - if request.Config["changeCheckMode"] != nil { - stateCheckMode = fmt.Sprintf("%v", request.Config["changeCheckMode"]) - } - - state.End = end - state.ExpectedChanges = expectedState - state.StateCheckMode = stateCheckMode - state.PreviousInSyncReplicas = make(map[int32][]int32) - state.PreviousReplicas = make(map[int32][]int32) - state.PreviousOfflineReplicas = make(map[int32][]int32) - state.PreviousLeader = make(map[int32]int32) - state.BrokerHosts = strings.Split(config.Config.SeedBrokers, ",") - - return nil, nil -} - -func (m *PartitionsCheckAction) Start(_ context.Context, _ *PartitionsCheckState) (*action_kit_api.StartResult, error) { - return nil, nil -} - -func (m *PartitionsCheckAction) Status(ctx context.Context, state *PartitionsCheckState) (*action_kit_api.StatusResult, error) { - return TopicCheckStatus(ctx, state) -} - -func TopicCheckStatus(ctx context.Context, state *PartitionsCheckState) (*action_kit_api.StatusResult, error) { - now := time.Now() - - client, err := createNewAdminClient(state.BrokerHosts) - if err != nil { - return nil, fmt.Errorf("failed to initialize kafka client: %s", err.Error()) - } - defer client.Close() - - topics, err := client.ListTopics(ctx, state.TopicName) - if err != nil { - return nil, extutil.Ptr(extension_kit.ToError(fmt.Sprintf("Failed to retrieve topics from Kafka for name %s. Full response: %v", state.TopicName, err), err)) - } - - var topicDetail kadm.TopicDetail - if len(topics.Sorted()) == 0 { - log.Err(err).Msgf("No topic with that name %s.", state.TopicName) - } else if len(topics.Sorted()) > 1 { - log.Err(err).Msgf("More than 1 topic with that name %s.", state.TopicName) - } else { - topicDetail = topics.Sorted()[0] - } - - completed := now.After(state.End) - var checkError *action_kit_api.ActionKitError - - // Check for changes - changes := make(map[string][]string) - for _, p := range topicDetail.Partitions.Sorted() { - if previousReplicas, ok := state.PreviousReplicas[p.Partition]; ok { - if !slices.Equal(previousReplicas, p.Replicas) { - changes[ReplicasChanged] = append(changes[ReplicasChanged], fmt.Sprintf("previous: %v,actual: %v", previousReplicas, p.Replicas)) - state.PreviousReplicas[p.Partition] = p.Replicas - } - } else { - state.PreviousReplicas[p.Partition] = p.Replicas - } - - if previousISR, ok := state.PreviousInSyncReplicas[p.Partition]; ok { - if !slices.Equal(previousISR, p.ISR) { - changes[InSyncReplicasChanged] = append(changes[InSyncReplicasChanged], fmt.Sprintf("previous: %v,actual: %v", previousISR, p.ISR)) - state.PreviousInSyncReplicas[p.Partition] = p.ISR - } - } else { - state.PreviousInSyncReplicas[p.Partition] = p.ISR - } - - if previousOffline, ok := state.PreviousOfflineReplicas[p.Partition]; ok { - if !slices.Equal(previousOffline, p.OfflineReplicas) { - changes[OfflineReplicasChanged] = append(changes[OfflineReplicasChanged], fmt.Sprintf("previous: %v,actual: %v", previousOffline, p.OfflineReplicas)) - state.PreviousOfflineReplicas[p.Partition] = p.OfflineReplicas - } - } else { - state.PreviousOfflineReplicas[p.Partition] = p.OfflineReplicas - } - - if previousLeader, ok := state.PreviousLeader[p.Partition]; ok { - if previousLeader != p.Leader { - changes[LeaderChanged] = append(changes[LeaderChanged], fmt.Sprintf("previous: %v,actual: %v", previousLeader, p.Leader)) - state.PreviousLeader[p.Partition] = p.Leader - } - } else { - state.PreviousLeader[p.Partition] = p.Leader - } - } - - keys := make([]string, 0, len(changes)) - for k := range changes { - keys = append(keys, k) - } - - if len(state.ExpectedChanges) > 0 { - if state.StateCheckMode == stateCheckModeAllTheTime { - for _, c := range keys { - if !slices.Contains(state.ExpectedChanges, c) { - checkError = extutil.Ptr(action_kit_api.ActionKitError{ - Title: fmt.Sprintf("Topic '%s' has an unexpected change '%s' whereas '%s' is expected. Change(s) : %v", - state.TopicName, - c, - state.ExpectedChanges, - changes[c]), - Status: extutil.Ptr(action_kit_api.Failed), - }) - } - } - } else if state.StateCheckMode == stateCheckModeAtLeastOnce { - for _, c := range keys { - if slices.Contains(state.ExpectedChanges, c) { - state.StateCheckSuccess = true - } - } - - if completed && !state.StateCheckSuccess { - checkError = extutil.Ptr(action_kit_api.ActionKitError{ - Title: fmt.Sprintf("Topic '%s' didn't have the expected changes '%s' at least once.", - state.TopicName, - state.ExpectedChanges), - Status: extutil.Ptr(action_kit_api.Failed), - }) - } - } - } - - metrics := []action_kit_api.Metric{ - *toTopicChangeMetric(state.TopicName, state.ExpectedChanges, keys, changes, now), - } - - return &action_kit_api.StatusResult{ - Completed: completed, - Error: checkError, - Metrics: extutil.Ptr(metrics), - }, nil -} - -func toTopicChangeMetric(topicName string, expectedChanges []string, changesNames []string, changes map[string][]string, now time.Time) *action_kit_api.Metric { - var tooltip string - var state string - - if len(changes) > 0 { - recap := "PARTITION ACTIVITY" - for k, v := range changes { - recap += "\n" + k + ":\n" - recap += strings.Join(v, "\n") - } - - tooltip = recap - - sort.Strings(expectedChanges) - sort.Strings(changesNames) - - for _, change := range changesNames { - if slices.Contains(expectedChanges, change) { - state = "success" - } else { - state = "danger" - } - } - } else { - tooltip = "No changes" - state = "info" - } - - return extutil.Ptr(action_kit_api.Metric{ - Name: extutil.Ptr("kafka_consumer_group_state"), - Metric: map[string]string{ - "kafka.topic.name": topicName, - "url": "", - "state": state, - "tooltip": tooltip, - }, - Timestamp: now, - Value: 0, - }) -} diff --git a/extkafka/check_topic_lag_for_consumer.go b/extkafka/check_topic_lag_for_consumer.go deleted file mode 100644 index 326cd23..0000000 --- a/extkafka/check_topic_lag_for_consumer.go +++ /dev/null @@ -1,247 +0,0 @@ -// SPDX-License-Identifier: MIT -// SPDX-FileCopyrightText: 2025 Steadybit GmbH - -package extkafka - -import ( - "context" - "fmt" - "github.com/rs/zerolog/log" - "github.com/steadybit/action-kit/go/action_kit_api/v2" - "github.com/steadybit/action-kit/go/action_kit_sdk" - "github.com/steadybit/extension-kafka/config" - extension_kit "github.com/steadybit/extension-kit" - "github.com/steadybit/extension-kit/extbuild" - "github.com/steadybit/extension-kit/extutil" - "github.com/twmb/franz-go/pkg/kadm" - "strconv" - "strings" - "time" -) - -type ConsumerGroupLagCheckAction struct{} - -type ConsumerGroupLagCheckState struct { - ConsumerGroupName string - Topic string - End time.Time - AcceptableLag int64 - StateCheckSuccess bool - StateCheckFailed bool - BrokerHosts []string -} - -// Make sure action implements all required interfaces -var ( - _ action_kit_sdk.Action[ConsumerGroupLagCheckState] = (*ConsumerGroupLagCheckAction)(nil) - _ action_kit_sdk.ActionWithStatus[ConsumerGroupLagCheckState] = (*ConsumerGroupLagCheckAction)(nil) -) - -func NewConsumerGroupLagCheckAction() action_kit_sdk.Action[ConsumerGroupLagCheckState] { - return &ConsumerGroupLagCheckAction{} -} - -func (m *ConsumerGroupLagCheckAction) NewEmptyState() ConsumerGroupLagCheckState { - return ConsumerGroupLagCheckState{} -} - -func (m *ConsumerGroupLagCheckAction) Describe() action_kit_api.ActionDescription { - return action_kit_api.ActionDescription{ - Id: fmt.Sprintf("%s.check-lag", kafkaConsumerTargetId), - Label: "Check Topic Lag", - Description: "Check the consumer lag for a given topic (lag is calculated by the difference between topic offset and consumer offset)", - Version: extbuild.GetSemverVersionStringOrUnknown(), - Icon: extutil.Ptr(kafkaIcon), - TargetSelection: extutil.Ptr(action_kit_api.TargetSelection{ - TargetType: kafkaConsumerTargetId, - QuantityRestriction: extutil.Ptr(action_kit_api.QuantityRestrictionAll), - SelectionTemplates: extutil.Ptr([]action_kit_api.TargetSelectionTemplate{ - { - Label: "consumer group name", - Description: extutil.Ptr("Find consumer group by name"), - Query: "kafka.consumer-group.name=\"\"", - }, - }), - }), - Technology: extutil.Ptr("Kafka"), - Category: extutil.Ptr("Kafka"), - Kind: action_kit_api.Check, - TimeControl: action_kit_api.TimeControlInternal, - Parameters: []action_kit_api.ActionParameter{ - { - Name: "duration", - Label: "Duration", - Description: extutil.Ptr(""), - Type: action_kit_api.ActionParameterTypeDuration, - DefaultValue: extutil.Ptr("30s"), - Required: extutil.Ptr(true), - }, - { - Name: "topic", - Label: "Topic to track lag", - Description: extutil.Ptr("One topic to track lags"), - Type: action_kit_api.ActionParameterTypeString, - Required: extutil.Ptr(true), - Options: extutil.Ptr([]action_kit_api.ParameterOption{ - action_kit_api.ParameterOptionsFromTargetAttribute{ - Attribute: "kafka.consumer-group.topics", - }, - }), - }, - { - Name: "acceptableLag", - Label: "Lag alert threshold", - Description: extutil.Ptr("How much lag is acceptable for this topic"), - Type: action_kit_api.ActionParameterTypeInteger, - Required: extutil.Ptr(true), - DefaultValue: extutil.Ptr("10"), - }, - }, - Widgets: extutil.Ptr([]action_kit_api.Widget{ - action_kit_api.LineChartWidget{ - Type: action_kit_api.ComSteadybitWidgetLineChart, - Title: "Consumer Group Lag", - Identity: action_kit_api.LineChartWidgetIdentityConfig{ - MetricName: "kafka_consumer_group_lag", - From: "id", - Mode: action_kit_api.ComSteadybitWidgetLineChartIdentityModeSelect, - }, - Grouping: extutil.Ptr(action_kit_api.LineChartWidgetGroupingConfig{ - ShowSummary: extutil.Ptr(true), - Groups: []action_kit_api.LineChartWidgetGroup{ - { - Title: "Under Acceptable Lag", - Color: "success", - Matcher: action_kit_api.LineChartWidgetGroupMatcherFallback{ - Type: action_kit_api.ComSteadybitWidgetLineChartGroupMatcherFallback, - }, - }, - { - Title: "Lag Constraint Violated", - Color: "warn", - Matcher: action_kit_api.LineChartWidgetGroupMatcherKeyEqualsValue{ - Type: action_kit_api.ComSteadybitWidgetLineChartGroupMatcherKeyEqualsValue, - Key: "lag_constraints_fulfilled", - Value: "false", - }, - }, - }, - }), - Tooltip: extutil.Ptr(action_kit_api.LineChartWidgetTooltipConfig{ - MetricValueTitle: extutil.Ptr("Lag"), - AdditionalContent: []action_kit_api.LineChartWidgetTooltipContent{ - { - From: "consumer", - Title: "Consumer", - }, - { - From: "topic", - Title: "Topic", - }, - }, - }), - }, - }), - Status: extutil.Ptr(action_kit_api.MutatingEndpointReferenceWithCallInterval{ - CallInterval: extutil.Ptr("1s"), - }), - } -} - -func (m *ConsumerGroupLagCheckAction) Prepare(_ context.Context, state *ConsumerGroupLagCheckState, request action_kit_api.PrepareActionRequestBody) (*action_kit_api.PrepareResult, error) { - if len(request.Target.Attributes["kafka.consumer-group.name"]) == 0 { - return nil, fmt.Errorf("the target is missing the kafka.consumer-group.name attribute") - } - state.Topic = extutil.ToString(request.Config["topic"]) - state.AcceptableLag = extutil.ToInt64(request.Config["acceptableLag"]) - state.StateCheckFailed = false - - duration := request.Config["duration"].(float64) - end := time.Now().Add(time.Millisecond * time.Duration(duration)) - - state.ConsumerGroupName = request.Target.Attributes["kafka.consumer-group.name"][0] - state.End = end - state.BrokerHosts = strings.Split(config.Config.SeedBrokers, ",") - - return nil, nil -} - -func (m *ConsumerGroupLagCheckAction) Start(_ context.Context, _ *ConsumerGroupLagCheckState) (*action_kit_api.StartResult, error) { - return nil, nil -} - -func (m *ConsumerGroupLagCheckAction) Status(ctx context.Context, state *ConsumerGroupLagCheckState) (*action_kit_api.StatusResult, error) { - return ConsumerGroupLagCheckStatus(ctx, state) -} - -func ConsumerGroupLagCheckStatus(ctx context.Context, state *ConsumerGroupLagCheckState) (*action_kit_api.StatusResult, error) { - now := time.Now() - - client, err := createNewAdminClient(state.BrokerHosts) - if err != nil { - return nil, fmt.Errorf("failed to initialize kafka client: %s", err.Error()) - } - defer client.Close() - - lags, err := client.Lag(ctx, state.ConsumerGroupName) - if err != nil { - return nil, extutil.Ptr(extension_kit.ToError(fmt.Sprintf("Failed to retrieve consumer groups from Kafka for name %s. Full response: %v", state.ConsumerGroupName, err), err)) - } - - var groupLag kadm.DescribedGroupLag - if len(lags.Sorted()) == 0 { - log.Err(err).Msgf("No lags for consumer group with that name %s.", state.ConsumerGroupName) - } else if len(lags.Sorted()) > 1 { - log.Err(err).Msgf("More than 1 lag description for consumer group with that name %s.", state.ConsumerGroupName) - } else { - groupLag = lags.Sorted()[0] - } - - if groupLag.FetchErr != nil { - return nil, extutil.Ptr(extension_kit.ToError(fmt.Sprintf("Error when fetching or describing the consumer group %s: %s", state.ConsumerGroupName, groupLag.FetchErr.Error()), groupLag.FetchErr)) - } - if groupLag.DescribeErr != nil { - return nil, extutil.Ptr(extension_kit.ToError(fmt.Sprintf("Error when fetching or describing the consumer group %s: %s", state.ConsumerGroupName, groupLag.DescribeErr.Error()), groupLag.DescribeErr)) - - } - topicLag := groupLag.Lag.TotalByTopic()[state.Topic].Lag - - completed := now.After(state.End) - var checkError *action_kit_api.ActionKitError - if topicLag < state.AcceptableLag { - state.StateCheckSuccess = true - } else { - state.StateCheckFailed = true - } - if completed && state.StateCheckFailed { - checkError = extutil.Ptr(action_kit_api.ActionKitError{ - Title: fmt.Sprintf("Consumer Group Lag was higher at least once than acceptable threshold '%d'.", - state.AcceptableLag), - Status: extutil.Ptr(action_kit_api.Failed), - }) - } - - metrics := []action_kit_api.Metric{ - *toMetric(topicLag, state, now), - } - - return &action_kit_api.StatusResult{ - Completed: completed, - Error: checkError, - Metrics: extutil.Ptr(metrics), - }, nil -} - -func toMetric(topicLag int64, stateGroupLag *ConsumerGroupLagCheckState, now time.Time) *action_kit_api.Metric { - return extutil.Ptr(action_kit_api.Metric{ - Name: extutil.Ptr("kafka_consumer_group_lag"), - Metric: map[string]string{ - "lag_constraints_fulfilled": strconv.FormatBool(topicLag < stateGroupLag.AcceptableLag), - "consumer": stateGroupLag.ConsumerGroupName, - "topic": stateGroupLag.Topic, - "id": stateGroupLag.ConsumerGroupName + "-" + stateGroupLag.Topic, - }, - Timestamp: now, - Value: float64(topicLag), - }) -} diff --git a/extkafka/check_topic_lag_for_consumer_test.go b/extkafka/check_topic_lag_for_consumer_test.go deleted file mode 100644 index 38450c8..0000000 --- a/extkafka/check_topic_lag_for_consumer_test.go +++ /dev/null @@ -1,224 +0,0 @@ -// SPDX-License-Identifier: MIT -// SPDX-FileCopyrightText: 2025 Steadybit GmbH - -package extkafka - -import ( - "context" - "fmt" - "github.com/google/uuid" - "github.com/steadybit/action-kit/go/action_kit_api/v2" - "github.com/steadybit/extension-kafka/config" - extension_kit "github.com/steadybit/extension-kit" - "github.com/steadybit/extension-kit/extutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/twmb/franz-go/pkg/kfake" - "github.com/twmb/franz-go/pkg/kgo" - "strings" - "testing" - "time" -) - -func TestCheckTopicLag_Describe(t *testing.T) { - //Given - action := ConsumerGroupLagCheckAction{} - - //When - response := action.Describe() - - //Then - assert.Equal(t, "Check the consumer lag for a given topic (lag is calculated by the difference between topic offset and consumer offset)", response.Description) - assert.Equal(t, "Check Topic Lag", response.Label) - assert.Equal(t, kafkaConsumerTargetId, response.TargetSelection.TargetType) - assert.Equal(t, fmt.Sprintf("%s.check-lag", kafkaConsumerTargetId), response.Id) - assert.Equal(t, extutil.Ptr("Kafka"), response.Technology) -} - -func TestCheckConsumerGroupLag_Prepare(t *testing.T) { - tests := []struct { - name string - requestBody action_kit_api.PrepareActionRequestBody - wantedError error - wantedState *ConsumerGroupLagCheckState - }{ - { - name: "Should return config", - requestBody: extutil.JsonMangle(action_kit_api.PrepareActionRequestBody{ - Target: &action_kit_api.Target{ - Attributes: map[string][]string{ - "kafka.consumer-group.name": {"steadybit"}, - }, - }, - Config: map[string]interface{}{ - "duration": 10000, - "topic": "steadybit", - "acceptableLag": "1", - }, - ExecutionId: uuid.New(), - }), - wantedState: &ConsumerGroupLagCheckState{ - ConsumerGroupName: "steadybit", - StateCheckSuccess: true, - Topic: "steadybit", - AcceptableLag: 1, - }, - }, - { - name: "Should return error for consumer group name", - requestBody: extutil.JsonMangle(action_kit_api.PrepareActionRequestBody{ - Target: &action_kit_api.Target{ - Attributes: map[string][]string{}, - }, - Config: map[string]interface{}{ - "duration": 10000, - "topic": "steadybit", - "acceptableLag": "1", - }, - ExecutionId: uuid.New(), - }), - wantedError: extension_kit.ToError("the target is missing the kafka.consumer-group.name attribute", nil), - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - //Given - action := ConsumerGroupLagCheckAction{} - state := ConsumerGroupLagCheckState{} - request := tt.requestBody - - //When - _, err := action.Prepare(t.Context(), &state, request) - - //Then - if tt.wantedError != nil { - assert.EqualError(t, err, tt.wantedError.Error()) - } - if tt.wantedState != nil { - assert.NoError(t, err) - assert.Equal(t, tt.wantedState.AcceptableLag, state.AcceptableLag) - assert.Equal(t, state.ConsumerGroupName, state.ConsumerGroupName) - assert.Equal(t, state.Topic, state.Topic) - assert.False(t, state.StateCheckSuccess) - assert.NotNil(t, state.End) - } - }) - } -} - -func TestCheckConsumerGroupLag_Status(t *testing.T) { - c, err := kfake.NewCluster( - kfake.SeedTopics(-1, "steadybit"), - kfake.NumBrokers(3), - ) - require.NoError(t, err) - defer c.Close() - - seeds := c.ListenAddrs() - config.Config.SeedBrokers = strings.Join(seeds, ",") - // One client can both produce and consume! - // Consuming can either be direct (no consumer group), or through a group. Below, we use a group. - cl, err := kgo.NewClient( - kgo.SeedBrokers(seeds...), - kgo.ConsumerGroup("steadybit"), - kgo.DefaultProduceTopic("steadybit"), - kgo.ConsumeTopics("steadybit"), - ) - require.NoError(t, err) - defer cl.Close() - - // produce messages for lags - for i := 0; i < 10; i++ { - cl.ProduceSync(context.TODO(), &kgo.Record{Key: []byte("steadybit"), Value: []byte("test")}) - } - - tests := []struct { - name string - requestBody action_kit_api.PrepareActionRequestBody - wantedError error - wantedState *ConsumerGroupLagCheckState - }{ - { - name: "Should return status ok", - requestBody: extutil.JsonMangle(action_kit_api.PrepareActionRequestBody{ - Target: &action_kit_api.Target{ - Attributes: map[string][]string{ - "kafka.consumer-group.name": {"steadybit"}, - }, - }, - Config: map[string]interface{}{ - "duration": 5000, - "topic": "steadybit", - "acceptableLag": "15", - }, - ExecutionId: uuid.New(), - }), - wantedState: &ConsumerGroupLagCheckState{ - ConsumerGroupName: "steadybit", - AcceptableLag: int64(15), - StateCheckSuccess: true, - Topic: "steadybit", - }, - }, - { - name: "Should return status ko", - requestBody: extutil.JsonMangle(action_kit_api.PrepareActionRequestBody{ - Target: &action_kit_api.Target{ - Attributes: map[string][]string{ - "kafka.consumer-group.name": {"steadybit"}, - }, - }, - Config: map[string]interface{}{ - "duration": 5000, - "topic": "steadybit", - "acceptableLag": "1", - }, - ExecutionId: uuid.New(), - }), - wantedState: &ConsumerGroupLagCheckState{ - ConsumerGroupName: "steadybit", - AcceptableLag: int64(1), - StateCheckSuccess: true, - Topic: "steadybit", - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - //Given - action := ConsumerGroupLagCheckAction{} - state := ConsumerGroupLagCheckState{} - request := tt.requestBody - - //When - _, errPrepare := action.Prepare(t.Context(), &state, request) - statusResult, errStatus := action.Status(t.Context(), &state) - - //Then - if tt.wantedState != nil { - assert.NoError(t, errPrepare) - assert.NoError(t, errStatus) - assert.Equal(t, tt.wantedState.AcceptableLag, state.AcceptableLag) - assert.Equal(t, tt.wantedState.Topic, state.Topic) - assert.Equal(t, tt.wantedState.ConsumerGroupName, state.ConsumerGroupName) - assert.False(t, statusResult.Completed) - assert.NotNil(t, state.End) - } - - time.Sleep(6 * time.Second) - - // Completed - _, errStatus = action.Status(t.Context(), &state) - - //Then - if tt.wantedState != nil { - assert.NoError(t, errPrepare) - assert.NoError(t, errStatus) - assert.Equal(t, tt.wantedState.AcceptableLag, state.AcceptableLag) - assert.Equal(t, tt.wantedState.Topic, state.Topic) - assert.Equal(t, tt.wantedState.ConsumerGroupName, state.ConsumerGroupName) - assert.NotNil(t, state.End) - } - }) - } -} diff --git a/extkafka/common.go b/extkafka/common.go deleted file mode 100644 index 60f4f4e..0000000 --- a/extkafka/common.go +++ /dev/null @@ -1,328 +0,0 @@ -// SPDX-License-Identifier: MIT -// SPDX-FileCopyrightText: 2025 Steadybit GmbH - -package extkafka - -import ( - "context" - "crypto/tls" - "crypto/x509" - "errors" - "fmt" - "github.com/google/uuid" - "github.com/rs/zerolog/log" - "github.com/steadybit/action-kit/go/action_kit_api/v2" - "github.com/steadybit/extension-kafka/config" - "github.com/steadybit/extension-kit/extutil" - "github.com/twmb/franz-go/pkg/kadm" - "github.com/twmb/franz-go/pkg/kgo" - "github.com/twmb/franz-go/pkg/sasl/plain" - "github.com/twmb/franz-go/pkg/sasl/scram" - "net" - "os" - "strconv" - "time" -) - -const ( - kafkaBrokerTargetId = "com.steadybit.extension_kafka.broker" - kafkaConsumerTargetId = "com.steadybit.extension_kafka.consumer" - kafkaTopicTargetId = "com.steadybit.extension_kafka.topic" -) - -const ( - kafkaIcon = "data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHZlcnNpb249IjEuMSIgdmlld0JveD0iMCAwIDI0IDI0Ij4KICA8cGF0aAogICAgZD0iTTE1LjksMTMuMmMtLjksMC0xLjYuNC0yLjIsMWwtMS4zLTFjLjEtLjQuMi0uOC4yLTEuM3MwLS45LS4yLTEuMmwxLjMtLjljLjUuNiwxLjMsMSwyLjEsMSwxLjYsMCwyLjktMS4zLDIuOS0yLjlzLTEuMy0yLjktMi45LTIuOS0yLjksMS4zLTIuOSwyLjksMCwuNi4xLjhsLTEuMy45Yy0uNi0uNy0xLjQtMS4yLTIuMy0xLjN2LTEuNmMxLjMtLjMsMi4zLTEuNCwyLjMtMi44LDAtMS42LTEuMy0yLjktMi45LTIuOXMtMi45LDEuMy0yLjksMi45LDEsMi41LDIuMiwyLjh2MS42Yy0xLjcuMy0zLjEsMS44LTMuMSwzLjZzMS4zLDMuNCwzLjEsMy42djEuN2MtMS4zLjMtMi4zLDEuNC0yLjMsMi44czEuMywyLjksMi45LDIuOSwyLjktMS4zLDIuOS0yLjktMS0yLjUtMi4zLTIuOHYtMS43Yy45LS4xLDEuNy0uNiwyLjMtMS4zbDEuNCwxYzAsLjMtLjEuNS0uMS44LDAsMS42LDEuMywyLjksMi45LDIuOXMyLjktMS4zLDIuOS0yLjktMS4zLTIuOS0yLjktMi45aDBaTTE1LjksNi41Yy44LDAsMS40LjYsMS40LDEuNHMtLjYsMS40LTEuNCwxLjQtMS40LS42LTEuNC0xLjQuNi0xLjQsMS40LTEuNGgwWk03LjUsMy45YzAtLjguNi0xLjQsMS40LTEuNHMxLjQuNiwxLjQsMS40LS42LDEuNC0xLjQsMS40LTEuNC0uNi0xLjQtMS40aDBaTTEwLjMsMjAuMWMwLC44LS42LDEuNC0xLjQsMS40cy0xLjQtLjYtMS40LTEuNC42LTEuNCwxLjQtMS40LDEuNC42LDEuNCwxLjRaTTguOSwxMy45Yy0xLjEsMC0xLjktLjktMS45LTEuOXMuOS0xLjksMS45LTEuOSwxLjkuOSwxLjksMS45LS45LDEuOS0xLjksMS45Wk0xNS45LDE3LjRjLS44LDAtMS40LS42LTEuNC0xLjRzLjYtMS40LDEuNC0xLjQsMS40LjYsMS40LDEuNC0uNiwxLjQtMS40LDEuNFoiCiAgICBmaWxsPSJjdXJyZW50Q29sb3IiIC8+Cjwvc3ZnPg==" - stateCheckModeAtLeastOnce = "atLeastOnce" - stateCheckModeAllTheTime = "allTheTime" -) - -type KafkaBrokerAttackState struct { - Topic string - Partition int32 - Offset int64 - DelayBetweenRequestsInMS int64 - SuccessRate int - Timeout time.Time - MaxConcurrent int - RecordKey string - RecordValue string - RecordPartition int - NumberOfRecords uint64 - ExecutionID uuid.UUID - RecordHeaders map[string]string - ConsumerGroup string - BrokerHosts []string -} - -type AlterState struct { - BrokerHosts []string - BrokerID int32 - InitialBrokerConfigValue int - TargetBrokerConfigValue int -} - -var ( - topic = action_kit_api.ActionParameter{ - Name: "topic", - Label: "Topic", - Description: extutil.Ptr("The Topic to send records to"), - Type: action_kit_api.ActionParameterTypeString, - Required: extutil.Ptr(true), - } - recordKey = action_kit_api.ActionParameter{ - Name: "recordKey", - Label: "Record key", - Description: extutil.Ptr("The Record Key. If none is set, the partition will be choose with round-robin algorithm."), - Type: action_kit_api.ActionParameterTypeString, - } - recordValue = action_kit_api.ActionParameter{ - Name: "recordValue", - Label: "Record value", - Description: extutil.Ptr("The Record Value."), - Type: action_kit_api.ActionParameterTypeString, - Required: extutil.Ptr(true), - } - recordHeaders = action_kit_api.ActionParameter{ - Name: "recordHeaders", - Label: "Record Headers", - Description: extutil.Ptr("The Record Headers."), - Type: action_kit_api.ActionParameterTypeKeyValue, - } - durationAlter = action_kit_api.ActionParameter{ - Label: "Duration", - Description: extutil.Ptr("The duration of the action. The broker configuration will be reverted at the end of the action."), - Name: "duration", - Type: action_kit_api.ActionParameterTypeDuration, - DefaultValue: extutil.Ptr("60s"), - Required: extutil.Ptr(true), - } - duration = action_kit_api.ActionParameter{ - Name: "duration", - Label: "Duration", - Description: extutil.Ptr("In which timeframe should the specified records be produced?"), - Type: action_kit_api.ActionParameterTypeDuration, - DefaultValue: extutil.Ptr("10s"), - Required: extutil.Ptr(true), - } - successRate = action_kit_api.ActionParameter{ - Name: "successRate", - Label: "Required Success Rate", - Description: extutil.Ptr("How many percent of the records must be at least successful (in terms of the following response verifications) to continue the experiment execution? The result will be evaluated and the end of the given duration."), - Type: action_kit_api.ActionParameterTypePercentage, - DefaultValue: extutil.Ptr("100"), - Required: extutil.Ptr(true), - MinValue: extutil.Ptr(0), - MaxValue: extutil.Ptr(100), - } - maxConcurrent = action_kit_api.ActionParameter{ - Name: "maxConcurrent", - Label: "Max concurrent requests", - Description: extutil.Ptr("Maximum count on parallel producing requests. (min 1, max 10)"), - Type: action_kit_api.ActionParameterTypeInteger, - DefaultValue: extutil.Ptr("5"), - MinValue: extutil.Ptr(1), - MaxValue: extutil.Ptr(10), - Required: extutil.Ptr(true), - Advanced: extutil.Ptr(true), - } -) - -func createNewClient(brokers []string) (*kgo.Client, error) { - opts := []kgo.Opt{ - kgo.SeedBrokers(brokers...), - kgo.ClientID("steadybit"), - } - - if config.Config.SaslMechanism != "" { - switch saslMechanism := config.Config.SaslMechanism; saslMechanism { - case kadm.ScramSha256.String(): - opts = append(opts, []kgo.Opt{ - kgo.SASL(scram.Auth{ - User: config.Config.SaslUser, - Pass: config.Config.SaslPassword, - }.AsSha256Mechanism()), - }...) - case kadm.ScramSha512.String(): - opts = append(opts, []kgo.Opt{ - kgo.SASL(scram.Auth{ - User: config.Config.SaslUser, - Pass: config.Config.SaslPassword, - }.AsSha512Mechanism()), - }...) - default: - opts = append(opts, []kgo.Opt{ - kgo.SASL(plain.Auth{ - User: config.Config.SaslUser, - Pass: config.Config.SaslPassword, - }.AsMechanism()), - }...) - } - } - - if config.Config.KafkaClusterCaFile != "" && config.Config.KafkaClusterCertKeyFile != "" && config.Config.KafkaClusterCertChainFile != "" { - tlsConfig, err := newTLSConfig(config.Config.KafkaClusterCertChainFile, config.Config.KafkaClusterCertKeyFile, config.Config.KafkaClusterCaFile) - if err != nil { - return nil, err - } - - opts = append(opts, kgo.DialTLSConfig(tlsConfig)) - } else if config.Config.KafkaConnectionUseTLS == "true" { - tlsDialer := &tls.Dialer{NetDialer: &net.Dialer{Timeout: 10 * time.Second}} - opts = append(opts, kgo.Dialer(tlsDialer.DialContext)) - } - - client, err := kgo.NewClient(opts...) - if err != nil { - return nil, fmt.Errorf("failed to initialize kafka client: %s", err.Error()) - } - log.Debug().Msgf("Initiating client with: %v", opts) - - return client, nil -} - -func createNewAdminClient(brokers []string) (*kadm.Client, error) { - client, err := createNewClient(brokers) - if err != nil { - return nil, err - } - return kadm.NewClient(client), nil -} - -func describeConfigInt(ctx context.Context, brokers []string, configName string, brokerID int32) (int, error) { - value, err := describeConfigStr(ctx, brokers, configName, brokerID) - if err != nil { - return -1, err - } - return strconv.Atoi(value) -} - -func describeConfigStr(ctx context.Context, brokers []string, configName string, brokerID int32) (string, error) { - adminClient, err := createNewAdminClient(brokers) - if err != nil { - return "", err - } - return describeConfigOf(ctx, adminClient, configName, brokerID) -} - -func describeConfigOf(ctx context.Context, adminClient *kadm.Client, configName string, brokerID int32) (configValue string, err error) { - configs, err := adminClient.DescribeBrokerConfigs(ctx, brokerID) - if err != nil { - return "", err - } - - _, err = configs.On(strconv.FormatInt(int64(brokerID), 10), func(resourceConfig *kadm.ResourceConfig) error { - for i := range resourceConfig.Configs { - if resourceConfig.Configs[i].Key == configName { - configValue = resourceConfig.Configs[i].MaybeValue() - return nil - } - } - - var values []string - for _, c := range resourceConfig.Configs { - values = append(values, fmt.Sprintf("%s: %s", c.Key, *c.Value)) - } - log.Debug().Strs("configs", values).Msgf("Configuration property %s could not be found", configName) - - return nil - }) - if err != nil { - return "", err - } - - if configValue == "" { - log.Warn().Msgf("No value found for configuration key: %s, for broker node-id: %d", configName, brokerID) - } else { - log.Debug().Msgf("Configuration value for key %s: %s, for broker node-id: %d", configName, configValue, brokerID) - } - - return configValue, nil -} - -func alterConfigInt(ctx context.Context, brokers []string, configName string, configValue int, brokerID int32) error { - return alterConfigStr(ctx, brokers, configName, strconv.Itoa(configValue), brokerID) -} - -func alterConfigStr(ctx context.Context, brokers []string, configName string, configValue string, brokerID int32) error { - adminClient, err := createNewAdminClient(brokers) - if err != nil { - return err - } - defer adminClient.Close() - - op := kadm.SetConfig - if configValue == "" { - op = kadm.DeleteConfig - } - responses, err := adminClient.AlterBrokerConfigs(ctx, []kadm.AlterConfig{{Name: configName, Value: extutil.Ptr(configValue), Op: op}}, brokerID) - if err != nil { - return err - } - var errs []error - for _, response := range responses { - if response.Err != nil { - detailedError := fmt.Errorf("%w Response from Broker: %s", response.Err, response.ErrMessage) - errs = append(errs, detailedError) - } - } - if len(errs) > 0 { - return errors.Join(errs...) - } - - // Changes may take time to be applied, wait accordingly - now := time.Now() - for { - if time.Since(now).Seconds() > 5 { - return fmt.Errorf("configuration change of %s to %s was not applied in time", configName, configValue) - } - value, err := describeConfigOf(ctx, adminClient, configName, brokerID) - if err != nil { - return err - } - if value == configValue { - return nil - } - log.Debug().Msgf("Configuration change of %s to %s was not applied yet, waiting", configName, configValue) - } -} - -func adjustThreads(ctx context.Context, hosts []string, configName string, targetValue int, brokerId int32) error { - currentValue, err := describeConfigInt(ctx, hosts, configName, brokerId) - if err != nil { - return err - } - - // As kafka does not allow to more than double or halve the number of threads, we use an iterative approach to get to that value - for currentValue != targetValue { - nextValue := max(min(targetValue, currentValue*2), currentValue/2) - - if err := alterConfigInt(ctx, hosts, configName, nextValue, brokerId); err != nil { - return err - } else { - currentValue = nextValue - } - } - return nil -} - -func newTLSConfig(clientCertFile, clientKeyFile, caCertFile string) (*tls.Config, error) { - tlsConfig := tls.Config{} - - // Load client cert - cert, err := tls.LoadX509KeyPair(clientCertFile, clientKeyFile) - if err != nil { - return &tlsConfig, err - } - tlsConfig.Certificates = []tls.Certificate{cert} - - // Load CA cert - caCert, err := os.ReadFile(caCertFile) - if err != nil { - return &tlsConfig, err - } - caCertPool := x509.NewCertPool() - caCertPool.AppendCertsFromPEM(caCert) - tlsConfig.RootCAs = caCertPool - - return &tlsConfig, err -} diff --git a/extkafka/consumergroup_discovery.go b/extkafka/consumergroup_discovery.go deleted file mode 100644 index dcd8142..0000000 --- a/extkafka/consumergroup_discovery.go +++ /dev/null @@ -1,150 +0,0 @@ -// SPDX-License-Identifier: MIT -// SPDX-FileCopyrightText: 2024 Steadybit GmbH - -package extkafka - -import ( - "context" - "errors" - "fmt" - "github.com/steadybit/discovery-kit/go/discovery_kit_api" - "github.com/steadybit/discovery-kit/go/discovery_kit_commons" - "github.com/steadybit/discovery-kit/go/discovery_kit_sdk" - "github.com/steadybit/extension-kafka/config" - "github.com/steadybit/extension-kit/extbuild" - "github.com/steadybit/extension-kit/extutil" - "github.com/twmb/franz-go/pkg/kadm" - "strings" - "time" -) - -type kafkaConsumerGroupDiscovery struct { -} - -var ( - _ discovery_kit_sdk.TargetDescriber = (*kafkaConsumerGroupDiscovery)(nil) - _ discovery_kit_sdk.AttributeDescriber = (*kafkaConsumerGroupDiscovery)(nil) -) - -func NewKafkaConsumerGroupDiscovery(ctx context.Context) discovery_kit_sdk.TargetDiscovery { - discovery := &kafkaConsumerGroupDiscovery{} - return discovery_kit_sdk.NewCachedTargetDiscovery(discovery, - discovery_kit_sdk.WithRefreshTargetsNow(), - discovery_kit_sdk.WithRefreshTargetsInterval(ctx, time.Duration(config.Config.DiscoveryIntervalConsumerGroup)*time.Second), - ) -} - -func (r *kafkaConsumerGroupDiscovery) Describe() discovery_kit_api.DiscoveryDescription { - return discovery_kit_api.DiscoveryDescription{ - Id: kafkaConsumerTargetId, - Discover: discovery_kit_api.DescribingEndpointReferenceWithCallInterval{ - CallInterval: extutil.Ptr(fmt.Sprintf("%ds", config.Config.DiscoveryIntervalConsumerGroup)), - }, - } -} - -func (r *kafkaConsumerGroupDiscovery) DescribeTarget() discovery_kit_api.TargetDescription { - return discovery_kit_api.TargetDescription{ - Id: kafkaConsumerTargetId, - Label: discovery_kit_api.PluralLabel{One: "Kafka consumer group", Other: "Kafka consumer groups"}, - Category: extutil.Ptr("kafka"), - Version: extbuild.GetSemverVersionStringOrUnknown(), - Icon: extutil.Ptr(kafkaIcon), - Table: discovery_kit_api.Table{ - Columns: []discovery_kit_api.Column{ - {Attribute: "steadybit.label"}, - {Attribute: "kafka.consumer-group.coordinator"}, - {Attribute: "kafka.consumer-group.protocol-type"}, - }, - OrderBy: []discovery_kit_api.OrderBy{ - { - Attribute: "steadybit.label", - Direction: "ASC", - }, - }, - }, - } -} - -func (r *kafkaConsumerGroupDiscovery) DescribeAttributes() []discovery_kit_api.AttributeDescription { - return []discovery_kit_api.AttributeDescription{ - { - Attribute: "kafka.consumer-group.name", - Label: discovery_kit_api.PluralLabel{ - One: "Kafka consumer group name", - Other: "Kafka consumer group names", - }, - }, - { - Attribute: "kafka.consumer-group.coordinator", - Label: discovery_kit_api.PluralLabel{ - One: "Kafka consumer group coordinator", - Other: "Kafka consumer group coordinators", - }, - }, { - Attribute: "kafka.consumer-group.protocol-type", - Label: discovery_kit_api.PluralLabel{ - One: "Kafka consumer group protocol type", - Other: "Kafka consumer group protocol types", - }, - }, - { - Attribute: "kafka.consumer-group.topics", - Label: discovery_kit_api.PluralLabel{ - One: "Kafka consumer group topic", - Other: "Kafka consumer group topics", - }, - }, - } -} - -func (r *kafkaConsumerGroupDiscovery) DiscoverTargets(ctx context.Context) ([]discovery_kit_api.Target, error) { - return getAllConsumerGroups(ctx) -} - -func getAllConsumerGroups(ctx context.Context) ([]discovery_kit_api.Target, error) { - result := make([]discovery_kit_api.Target, 0, 20) - - client, err := createNewAdminClient(strings.Split(config.Config.SeedBrokers, ",")) - if err != nil { - return nil, fmt.Errorf("failed to initialize kafka client: %s", err.Error()) - } - defer client.Close() - - var seList *kadm.ShardErrors - describedGroups, err := client.DescribeGroups(ctx) - switch { - case err == nil: - case errors.As(err, &seList): - default: - return nil, fmt.Errorf("failed to describe consumer groups: %v", err) - } - metadata, err := client.BrokerMetadata(ctx) - if err != nil { - return nil, fmt.Errorf("failed to get brokers metadata : %v", err) - } - for _, group := range describedGroups.Sorted() { - result = append(result, toConsumerGroupTarget(group, metadata.Cluster)) - } - - return discovery_kit_commons.ApplyAttributeExcludes(result, config.Config.DiscoveryAttributesExcludesConsumerGroups), nil -} - -func toConsumerGroupTarget(group kadm.DescribedGroup, clusterName string) discovery_kit_api.Target { - id := fmt.Sprintf("%v-%s", group.Group, clusterName) - label := fmt.Sprintf("%v", group.Group) - - attributes := make(map[string][]string) - attributes["kafka.cluster.name"] = []string{clusterName} - attributes["kafka.consumer-group.name"] = []string{fmt.Sprintf("%v", group.Group)} - attributes["kafka.consumer-group.coordinator"] = []string{fmt.Sprintf("%v", group.Coordinator.Host)} - attributes["kafka.consumer-group.protocol-type"] = []string{group.ProtocolType} - attributes["kafka.consumer-group.topics"] = group.AssignedPartitions().Topics() - - return discovery_kit_api.Target{ - Id: id, - Label: label, - TargetType: kafkaConsumerTargetId, - Attributes: attributes, - } -} diff --git a/extkafka/consumergroup_discovery_test.go b/extkafka/consumergroup_discovery_test.go deleted file mode 100644 index a3a0a9b..0000000 --- a/extkafka/consumergroup_discovery_test.go +++ /dev/null @@ -1,148 +0,0 @@ -// SPDX-License-Identifier: MIT -// SPDX-FileCopyrightText: 2025 Steadybit GmbH - -package extkafka - -import ( - "fmt" - "github.com/steadybit/discovery-kit/go/discovery_kit_api" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "testing" - - "github.com/twmb/franz-go/pkg/kadm" - - "context" - "github.com/steadybit/extension-kafka/config" - "github.com/twmb/franz-go/pkg/kfake" - "strings" -) - -func TestDescribe(t *testing.T) { - desc := (&kafkaTopicDiscovery{}).Describe() - assert.Equal(t, kafkaTopicTargetId, desc.Id) - assert.NotNil(t, desc.Discover.CallInterval) -} - -func TestDescribeTarget(t *testing.T) { - d := &kafkaTopicDiscovery{} - td := d.DescribeTarget() - assert.Equal(t, kafkaTopicTargetId, td.Id) - assert.Equal(t, "Kafka topic", td.Label.One) - assert.Equal(t, "Kafka topics", td.Label.Other) - assert.Equal(t, "kafka", *td.Category) - assert.Len(t, td.Table.Columns, 6) - require.Len(t, td.Table.OrderBy, 1) - assert.Equal(t, "steadybit.label", td.Table.OrderBy[0].Attribute) - assert.Equal(t, discovery_kit_api.OrderByDirection("ASC"), td.Table.OrderBy[0].Direction) -} - -func TestDescribeAttributes(t *testing.T) { - d := &kafkaTopicDiscovery{} - attrs := d.DescribeAttributes() - expected := []string{ - "kafka.topic.name", - "kafka.topic.partitions", - "kafka.topic.partitions-leaders", - "kafka.topic.partitions-replicas", - "kafka.topic.partitions-isr", - "kafka.topic.replication-factor", - } - require.Len(t, attrs, len(expected)) - for _, want := range expected { - found := false - for _, a := range attrs { - if a.Attribute == want { - found = true - break - } - } - assert.Truef(t, found, "DescribeAttributes() missing %q", want) - } -} - -func TestToTopicTarget(t *testing.T) { - td := kadm.TopicDetail{ - Topic: "my-topic", - Partitions: kadm.PartitionDetails{ - 1: {Partition: 1, Leader: 101, Replicas: []int32{101, 102}, ISR: []int32{101}}, - 0: {Partition: 0, Leader: 100, Replicas: []int32{100, 102}, ISR: []int32{100, 102}}, - }, - } - cluster := "cluster-42" - tgt := toTopicTarget(td, cluster) - - // Basic fields - assert.Equal(t, "my-topic-cluster-42", tgt.Id) - assert.Equal(t, "my-topic", tgt.Label) - assert.Equal(t, kafkaTopicTargetId, tgt.TargetType) - - // Attributes - attr := tgt.Attributes - check := func(key string, want []string) { - v, ok := attr[key] - assert.True(t, ok, "missing attribute %q", key) - assert.Equal(t, want, v) - } - - check("kafka.cluster.name", []string{cluster}) - check("kafka.topic.name", []string{"my-topic"}) - check("kafka.topic.partitions", []string{"0", "1"}) - check("kafka.topic.partitions-leaders", []string{"0->leader=100", "1->leader=101"}) - check( - "kafka.topic.partitions-replicas", - []string{ - fmt.Sprintf("0->replicas=%v", []int{100, 102}), - fmt.Sprintf("1->replicas=%v", []int{101, 102}), - }, - ) - check( - "kafka.topic.partitions-isr", - []string{ - fmt.Sprintf("0->in-sync-replicas=%v", []int{100, 102}), - fmt.Sprintf("1->in-sync-replicas=%v", []int{101}), - }, - ) - check("kafka.topic.replication-factor", []string{"2"}) -} - -// TestDiscoverTargetsClusterName verifies that the kafka.cluster.name attribute -// is correctly set when discovering topics against a fake Kafka cluster. -func TestDiscoverTargetsClusterName(t *testing.T) { - // Set up fake Kafka cluster with a topic "steadybit" - c, err := kfake.NewCluster( - kfake.SeedTopics(-1, "steadybit"), - kfake.NumBrokers(1), - kfake.ClusterID("test"), - ) - require.NoError(t, err) - defer c.Close() - - // Configure seed brokers for discovery - seeds := c.ListenAddrs() - config.Config.SeedBrokers = strings.Join(seeds, ",") - - // Ensure no excluded attributes - config.Config.DiscoveryAttributesExcludesTopics = nil - - // Discover targets - ctx := context.Background() - targets, err := getAllTopics(ctx) - require.NoError(t, err) - require.NotEmpty(t, targets) - - // Retrieve expected cluster name from metadata - client, err := createNewAdminClient(strings.Split(config.Config.SeedBrokers, ",")) - require.NoError(t, err) - defer client.Close() - meta, err := client.BrokerMetadata(ctx) - require.NoError(t, err) - expected := meta.Cluster - - // Assert each discovered target has the correct cluster name attribute - for _, tgt := range targets { - values, ok := tgt.Attributes["kafka.cluster.name"] - require.True(t, ok, "missing kafka.cluster.name for target %s", tgt.Id) - require.Equal(t, []string{expected}, values) - } -} diff --git a/extkafka/delete_last_records.go b/extkafka/delete_last_records.go deleted file mode 100644 index a469c9f..0000000 --- a/extkafka/delete_last_records.go +++ /dev/null @@ -1,155 +0,0 @@ -// SPDX-License-Identifier: MIT -// SPDX-FileCopyrightText: 2023 Steadybit GmbH - -package extkafka - -import ( - "context" - "errors" - "fmt" - "github.com/steadybit/action-kit/go/action_kit_api/v2" - "github.com/steadybit/action-kit/go/action_kit_sdk" - "github.com/steadybit/extension-kafka/config" - extension_kit "github.com/steadybit/extension-kit" - "github.com/steadybit/extension-kit/extbuild" - "github.com/steadybit/extension-kit/extutil" - "github.com/twmb/franz-go/pkg/kadm" - "strconv" - "strings" -) - -type DeleteRecordsAttack struct{} - -type DeleteRecordsState struct { - TopicName string - Partitions []string - Offset int64 - BrokerHosts []string -} - -var _ action_kit_sdk.Action[DeleteRecordsState] = (*DeleteRecordsAttack)(nil) - -func NewDeleteRecordsAttack() action_kit_sdk.Action[DeleteRecordsState] { - return &DeleteRecordsAttack{} -} - -func (k *DeleteRecordsAttack) NewEmptyState() DeleteRecordsState { - return DeleteRecordsState{} -} - -func (k *DeleteRecordsAttack) Describe() action_kit_api.ActionDescription { - return action_kit_api.ActionDescription{ - Id: fmt.Sprintf("%s.delete-records", kafkaTopicTargetId), - Label: "Trigger Delete Records", - Description: "Trigger delete records to move the offset relative to the last known offset for each selected partition", - Version: extbuild.GetSemverVersionStringOrUnknown(), - Icon: extutil.Ptr(kafkaIcon), - TargetSelection: extutil.Ptr(action_kit_api.TargetSelection{ - TargetType: kafkaTopicTargetId, - SelectionTemplates: extutil.Ptr([]action_kit_api.TargetSelectionTemplate{ - { - Label: "topic name", - Description: extutil.Ptr("Find topic by name"), - Query: "kafka.topic.name=\"\"", - }, - }), - }), - Technology: extutil.Ptr("Kafka"), - Category: extutil.Ptr("Kafka"), - TimeControl: action_kit_api.TimeControlInstantaneous, - Kind: action_kit_api.Attack, - Parameters: []action_kit_api.ActionParameter{ - { - Name: "partitions", - Label: "Partition to issue delete records requests", - Description: extutil.Ptr("One or more partitions to delete the records"), - Type: action_kit_api.ActionParameterTypeStringArray, - Required: extutil.Ptr(true), - Options: extutil.Ptr([]action_kit_api.ParameterOption{ - action_kit_api.ParameterOptionsFromTargetAttribute{ - Attribute: "kafka.topic.partitions", - }, - }), - }, - { - Label: "X from newest Offset", - Description: extutil.Ptr("To move the offset in the past, 0 means to the last known offset (skipping every records from where the consumer was)."), - Name: "offset", - Type: action_kit_api.ActionParameterTypeInteger, - DefaultValue: extutil.Ptr("0"), - Required: extutil.Ptr(true), - }, - }, - } -} - -func (k *DeleteRecordsAttack) Prepare(_ context.Context, state *DeleteRecordsState, request action_kit_api.PrepareActionRequestBody) (*action_kit_api.PrepareResult, error) { - state.TopicName = extutil.MustHaveValue(request.Target.Attributes, "kafka.topic.name")[0] - state.Partitions = extutil.ToStringArray(request.Config["partitions"]) - state.Offset = extutil.ToInt64(request.Config["offset"]) - state.BrokerHosts = strings.Split(config.Config.SeedBrokers, ",") - - return nil, nil -} - -func (k *DeleteRecordsAttack) Start(ctx context.Context, state *DeleteRecordsState) (*action_kit_api.StartResult, error) { - var errs []error - - adminClient, err := createNewAdminClient(state.BrokerHosts) - if err != nil { - return nil, err - } - defer adminClient.Close() - - // Get Current offset - endOffsets, err := adminClient.ListEndOffsets(ctx, state.TopicName) - if err != nil { - return nil, err - } - if endOffsets.Error() != nil { - return nil, endOffsets.Error() - } - - var logMessages []string - // For each partition, fetch the offset - for _, partition := range state.Partitions { - var partitionInt int64 - partitionInt, err = strconv.ParseInt(partition, 10, 64) - if err != nil { - return nil, extension_kit.ToError(fmt.Sprintf("Failed to convert partition %s to int32", partition), err) - } - - endOffset, found := endOffsets.Lookup(state.TopicName, int32(partitionInt)) - if !found { - return nil, extension_kit.ToError(fmt.Sprintf("Failed to find offset for topic %s and partition %s", state.TopicName, partition), nil) - } - - newOffsets := kadm.Offsets{} - newOffset := endOffset.Offset - state.Offset - newOffsets.Add(kadm.Offset{Topic: endOffset.Topic, Partition: endOffset.Partition, LeaderEpoch: endOffset.LeaderEpoch, At: newOffset}) - - var offsetResponses kadm.DeleteRecordsResponses - offsetResponses, err = adminClient.DeleteRecords(ctx, newOffsets) - if err != nil { - return nil, err - } - - for _, newOffsetResponse := range offsetResponses.Sorted() { - if newOffsetResponse.Err != nil { - errs = append(errs, newOffsetResponse.Err) - } - } - if len(errs) > 0 { - return nil, errors.Join(errs...) - } - logMessages = append(logMessages, fmt.Sprintf("Trigger delete records for topic %s for partition %v, moving offset at %d", state.TopicName, partition, newOffset)) - } - - return &action_kit_api.StartResult{ - Messages: &[]action_kit_api.Message{{ - Level: extutil.Ptr(action_kit_api.Info), - Message: strings.Join(logMessages, "\n"), - }}, - }, nil - -} diff --git a/extkafka/partition_attack_elect_new_leader.go b/extkafka/partition_attack_elect_new_leader.go deleted file mode 100644 index ac138fc..0000000 --- a/extkafka/partition_attack_elect_new_leader.go +++ /dev/null @@ -1,168 +0,0 @@ -// SPDX-License-Identifier: MIT -// SPDX-FileCopyrightText: 2023 Steadybit GmbH - -package extkafka - -import ( - "context" - "errors" - "fmt" - "github.com/rs/zerolog/log" - "github.com/steadybit/action-kit/go/action_kit_api/v2" - "github.com/steadybit/action-kit/go/action_kit_sdk" - "github.com/steadybit/extension-kafka/config" - extension_kit "github.com/steadybit/extension-kit" - "github.com/steadybit/extension-kit/extbuild" - "github.com/steadybit/extension-kit/extutil" - "github.com/twmb/franz-go/pkg/kadm" - "slices" - "strings" -) - -type kafkaBrokerElectNewLeaderAttack struct { -} - -var _ action_kit_sdk.Action[KafkaBrokerAttackState] = (*kafkaBrokerElectNewLeaderAttack)(nil) - -func NewKafkaBrokerElectNewLeaderAttack() action_kit_sdk.Action[KafkaBrokerAttackState] { - return kafkaBrokerElectNewLeaderAttack{} -} - -func (f kafkaBrokerElectNewLeaderAttack) NewEmptyState() KafkaBrokerAttackState { - return KafkaBrokerAttackState{} -} - -func (f kafkaBrokerElectNewLeaderAttack) Describe() action_kit_api.ActionDescription { - return action_kit_api.ActionDescription{ - Id: fmt.Sprintf("%s.elect-new-leader", kafkaTopicTargetId), - Label: "Elect New Partition Leader", - Description: "Elect a new leader for a given topic and partition, only by trying to elect a new preferred replica. The current leader of the partition will be placed at the end of replicas through a reassignment", - Version: extbuild.GetSemverVersionStringOrUnknown(), - Icon: extutil.Ptr(kafkaIcon), - TargetSelection: extutil.Ptr(action_kit_api.TargetSelection{ - TargetType: kafkaTopicTargetId, - SelectionTemplates: extutil.Ptr([]action_kit_api.TargetSelectionTemplate{ - { - Label: "topic id", - Description: extutil.Ptr("Find topic by id"), - Query: "kafka.topic.id=\"\"", - }, - }), - }), - Technology: extutil.Ptr("Kafka"), - Category: extutil.Ptr("Kafka"), - TimeControl: action_kit_api.TimeControlInstantaneous, - Kind: action_kit_api.Attack, - Parameters: []action_kit_api.ActionParameter{ - { - Name: "partitions", - Label: "Partition to elect a new leader (preferred replica)", - Description: extutil.Ptr("The partition to elect a new leader"), - Type: action_kit_api.ActionParameterTypeString, - Required: extutil.Ptr(true), - Options: extutil.Ptr([]action_kit_api.ParameterOption{ - action_kit_api.ParameterOptionsFromTargetAttribute{ - Attribute: "kafka.topic.partitions", - }, - }), - }, - }, - } -} - -func (f kafkaBrokerElectNewLeaderAttack) Prepare(_ context.Context, state *KafkaBrokerAttackState, request action_kit_api.PrepareActionRequestBody) (*action_kit_api.PrepareResult, error) { - state.Topic = extutil.MustHaveValue(request.Target.Attributes, "kafka.topic.name")[0] - state.Partition = extutil.ToInt32(request.Config["partitions"]) - state.BrokerHosts = strings.Split(config.Config.SeedBrokers, ",") - - return nil, nil -} - -func (f kafkaBrokerElectNewLeaderAttack) Start(ctx context.Context, state *KafkaBrokerAttackState) (*action_kit_api.StartResult, error) { - messages := make([]action_kit_api.Message, 0) - client, err := createNewAdminClient(state.BrokerHosts) - if err != nil { - return nil, fmt.Errorf("failed to initialize kafka client: %s", err.Error()) - } - defer client.Close() - - // Find the corresponding leader info - topics, err := client.ListTopics(ctx, state.Topic) - if err != nil { - return nil, extutil.Ptr(extension_kit.ToError(fmt.Sprintf("Failed to retrieve topics from Kafka for name %s. Full response: %v", state.Topic, err), err)) - } - - var topicDetail kadm.TopicDetail - if len(topics.Sorted()) == 0 { - log.Err(err).Msgf("No topic with that name %s.", state.Topic) - } else if len(topics.Sorted()) > 1 { - log.Err(err).Msgf("More than 1 topic with that name %s.", state.Topic) - } else { - topicDetail = topics.Sorted()[0] - } - partition := topicDetail.Partitions[state.Partition] - - assignment := kadm.AlterPartitionAssignmentsReq{} - assignment.Assign(state.Topic, state.Partition, relegateLeader(partition.Replicas, partition.ISR, partition.Leader)) - // Reassign the leader to the end of replicas preferences, to give a chance to another broker to become leader - _, err = client.AlterPartitionAssignments(ctx, assignment) - if err != nil { - return nil, err - } - - topicSet := make(kadm.TopicsSet) - topicSet.Add(state.Topic, []int32{state.Partition}...) - - results, err := client.ElectLeaders(ctx, kadm.ElectPreferredReplica, topicSet) - if err != nil { - return nil, fmt.Errorf("failed to elect new leader for topic %s and partition %d: %s", state.Topic, state.Partition, err) - } - var errorElectLeader action_kit_api.ActionKitError - var errs []error - for t, parts := range results { - for partition, result := range parts { - if result.Err != nil { - messages = append(messages, action_kit_api.Message{ - Level: extutil.Ptr(action_kit_api.Warn), - Message: fmt.Sprintf("Error while electing leader for topic '%s', partition %d, error is: %s", t, partition, result.Err.Error()), - }) - errs = append(errs, fmt.Errorf("error while electing leader for topic '%s', partition %d, error is: %s", t, partition, result.Err.Error())) - } else { - messages = append(messages, action_kit_api.Message{ - Level: extutil.Ptr(action_kit_api.Info), - Message: fmt.Sprintf("Successfully elected leader for topic '%s', partition %d.", t, partition), - }) - } - } - } - if len(errs) > 0 { - errorElectLeader = action_kit_api.ActionKitError{Title: "Election failed for partition(s)", Detail: extutil.Ptr(errors.Join(errs...).Error())} - return &action_kit_api.StartResult{ - Messages: &messages, - Error: &errorElectLeader, - }, nil - } - - return &action_kit_api.StartResult{ - Messages: &messages, - }, nil - -} - -func relegateLeader(replicas []int32, replicaInSync []int32, leader int32) []int32 { - var brokers []int32 - // Add first the next in sync replicas - for _, ns := range replicas { - if ns != leader && slices.Contains(replicaInSync, ns) { - brokers = append(brokers, ns) - } - } - // Then add not in-sync replicas - for _, ns := range replicas { - if !slices.Contains(replicaInSync, ns) && ns != leader { - brokers = append(brokers, ns) - } - } - // and add last the leader - return append(brokers, leader) -} diff --git a/extkafka/produce.go b/extkafka/produce.go deleted file mode 100644 index bc358b5..0000000 --- a/extkafka/produce.go +++ /dev/null @@ -1,304 +0,0 @@ -// SPDX-License-Identifier: MIT -// SPDX-FileCopyrightText: 2025 Steadybit GmbH - -package extkafka - -import ( - "context" - "fmt" - "github.com/google/uuid" - "github.com/rs/zerolog/log" - "github.com/steadybit/action-kit/go/action_kit_api/v2" - "github.com/steadybit/extension-kafka/config" - "github.com/steadybit/extension-kit/extutil" - "github.com/twmb/franz-go/pkg/kgo" - "strings" - "sync" - "sync/atomic" - "time" -) - -type ExecutionRunData struct { - stopTicker chan bool // stores the stop channels for each execution - jobs chan time.Time // stores the jobs for each execution - tickers *time.Ticker // stores the tickers for each execution, to be able to stop them - metrics chan action_kit_api.Metric // stores the metrics for each execution - requestCounter atomic.Uint64 // stores the number of requests for each execution - requestSuccessCounter atomic.Uint64 // stores the number of successful requests for each execution -} - -var ( - ExecutionRunDataMap = sync.Map{} //make(map[uuid.UUID]*ExecutionRunData) -) - -func prepare(request action_kit_api.PrepareActionRequestBody, state *KafkaBrokerAttackState, checkEnded func(executionRunData *ExecutionRunData, state *KafkaBrokerAttackState) bool) (*action_kit_api.PrepareResult, error) { - if len(request.Target.Attributes["kafka.topic.name"]) == 0 { - return nil, fmt.Errorf("the target is missing the kafka.topic.name attribute") - } - state.Topic = extutil.MustHaveValue(request.Target.Attributes, "kafka.topic.name")[0] - - duration := extutil.ToInt64(request.Config["duration"]) - state.Timeout = time.Now().Add(time.Millisecond * time.Duration(duration)) - state.SuccessRate = extutil.ToInt(request.Config["successRate"]) - state.MaxConcurrent = extutil.ToInt(request.Config["maxConcurrent"]) - - if state.MaxConcurrent == 0 { - return nil, fmt.Errorf("max concurrent can't be zero") - } - state.NumberOfRecords = extutil.ToUInt64(request.Config["numberOfRecords"]) - state.RecordKey = extutil.ToString(request.Config["recordKey"]) - state.RecordValue = extutil.ToString(request.Config["recordValue"]) - state.ExecutionID = request.ExecutionId - state.BrokerHosts = strings.Split(config.Config.SeedBrokers, ",") - - var err error - if _, ok := request.Config["recordHeaders"]; ok { - state.RecordHeaders, err = extutil.ToKeyValue(request.Config, "recordHeaders") - if err != nil { - log.Error().Err(err).Msg("Failed to parse headers") - return nil, err - } - } - - initExecutionRunData(state) - executionRunData, err := loadExecutionRunData(state.ExecutionID) - if err != nil { - log.Error().Err(err).Msg("Failed to load execution run data") - return nil, err - } - - // create worker pool - for w := 1; w <= state.MaxConcurrent; w++ { - go requestProducerWorker(executionRunData, state, checkEnded) - } - return nil, nil -} - -func loadExecutionRunData(executionID uuid.UUID) (*ExecutionRunData, error) { - erd, ok := ExecutionRunDataMap.Load(executionID) - if !ok { - return nil, fmt.Errorf("failed to load execution run data") - } - executionRunData := erd.(*ExecutionRunData) - return executionRunData, nil -} - -func initExecutionRunData(state *KafkaBrokerAttackState) { - saveExecutionRunData(state.ExecutionID, &ExecutionRunData{ - stopTicker: make(chan bool), - jobs: make(chan time.Time, state.MaxConcurrent), - metrics: make(chan action_kit_api.Metric, state.MaxConcurrent), - requestCounter: atomic.Uint64{}, - requestSuccessCounter: atomic.Uint64{}, - }) -} - -func saveExecutionRunData(executionID uuid.UUID, executionRunData *ExecutionRunData) { - ExecutionRunDataMap.Store(executionID, executionRunData) -} - -func createRecord(state *KafkaBrokerAttackState) *kgo.Record { - record := kgo.KeyStringRecord(state.RecordKey, state.RecordValue) - record.Topic = state.Topic - - if state.RecordHeaders != nil { - for k, v := range state.RecordHeaders { - record.Headers = append(record.Headers, kgo.RecordHeader{Key: k, Value: []byte(v)}) - } - } - - return record -} - -func requestProducerWorker(executionRunData *ExecutionRunData, state *KafkaBrokerAttackState, checkEnded func(executionRunData *ExecutionRunData, state *KafkaBrokerAttackState) bool) { - client, err := createNewClient(state.BrokerHosts) - if err != nil { - log.Error().Err(err).Msg("Failed to create client") - } - for range executionRunData.jobs { - if !checkEnded(executionRunData, state) { - //var started = time.Now() - rec := createRecord(state) - - //var producedRecord *kgo.Record - _, err = client.ProduceSync(context.Background(), rec).First() - executionRunData.requestCounter.Add(1) - - if err != nil { - log.Error().Err(err).Msg("Failed to produce record") - //now := time.Now() - //executionRunData.metrics <- action_kit_api.Metric{ - // Metric: map[string]string{ - // "topic": rec.Topic, - // "producer": strconv.Itoa(int(rec.ProducerID)), - // "brokers": config.Config.SeedBrokers, - // "error": err.Error(), - // }, - // Name: extutil.Ptr("producer_response_time"), - // Value: float64(now.Sub(started).Milliseconds()), - // Timestamp: now, - //} - } else { - // Successfully produced the record - //recordProducerLatency := float64(producedRecord.Timestamp.Sub(started).Milliseconds()) - //metricMap := map[string]string{ - // "topic": rec.Topic, - // "producer": strconv.Itoa(int(rec.ProducerID)), - // "brokers": config.Config.SeedBrokers, - // "error": "", - //} - - executionRunData.requestSuccessCounter.Add(1) - - //metric := action_kit_api.Metric{ - // Name: extutil.Ptr("record_latency"), - // Metric: metricMap, - // Value: recordProducerLatency, - // Timestamp: producedRecord.Timestamp, - //} - //executionRunData.metrics <- metric - } - } - } - err = client.Flush(context.Background()) - if err != nil { - log.Error().Err(err).Msg("Failed to flush") - } - defer client.Close() -} - -//func requestConsumerWorker(executionRunData *ExecutionRunData, state *KafkaBrokerAttackState, checkEnded func(executionRunData *ExecutionRunData, state *KafkaBrokerAttackState) bool) { -// opts := []kgo.Opt{ -// kgo.SeedBrokers(config.Config.SeedBrokers), -// kgo.ConsumerGroup("steadybit-extension-kafka"), -// kgo.ConsumeResetOffset(kgo.NewOffset().AtStart()), -// kgo.ConsumeTopics(state.Topic), -// } -// -// client, err := kgo.NewClient(opts...) -// if err != nil { -// log.Error().Err(err).Msg("Failed to create client") -// } -// defer client.Close() -// -// if !checkEnded(executionRunData, state) { -// for range executionRunData.jobs { -// // Poll for records -// fetches := client.PollRecords(context.TODO(), 1) -// if fetches.IsClientClosed() { -// break -// } -// -// // Handle errors -// if errs := fetches.Errors(); len(errs) > 0 { -// for _, e := range errs { -// log.Error().Err(e.Err).Msgf("Error consuming from topic %s partition %d", e.Topic, e.Partition) -// return -// } -// } -// executionRunData.requestCounter.Add(1) -// -// // Process the records -// fetches.EachRecord(func(record *kgo.Record) { -// log.Debug().Msgf("Topic: %s, Partition: %d, Offset: %d\n", -// record.Topic, record.Partition, record.Offset) -// log.Debug().Msgf("Key: %s\n", string(record.Key)) -// log.Debug().Msgf("Value: %s\n", string(record.Value)) -// log.Debug().Msgf("Timestamp: %s\n", record.Timestamp) -// log.Debug().Msgf("---") -// -// executionRunData.requestSuccessCounter.Add(1) -// return -// }) -// } -// } -//} - -func start(state *KafkaBrokerAttackState) { - executionRunData, err := loadExecutionRunData(state.ExecutionID) - if err != nil { - log.Error().Err(err).Msg("Failed to load execution run data") - } - executionRunData.tickers = time.NewTicker(time.Duration(state.DelayBetweenRequestsInMS) * time.Millisecond) - executionRunData.stopTicker = make(chan bool) - - now := time.Now() - log.Debug().Msgf("Schedule first record at %v", now) - executionRunData.jobs <- now - go func() { - for { - select { - case <-executionRunData.stopTicker: - log.Debug().Msg("Stop Record Scheduler") - return - case t := <-executionRunData.tickers.C: - log.Debug().Msgf("Schedule Record at %v", t) - executionRunData.jobs <- t - } - } - }() - ExecutionRunDataMap.Store(state.ExecutionID, executionRunData) -} - -func retrieveLatestMetrics(metrics chan action_kit_api.Metric) []action_kit_api.Metric { - - statusMetrics := make([]action_kit_api.Metric, 0, len(metrics)) - for { - select { - case metric, ok := <-metrics: - if ok { - log.Debug().Msgf("Status Metric: %v", metric) - statusMetrics = append(statusMetrics, metric) - } else { - log.Debug().Msg("Channel closed") - return statusMetrics - } - default: - log.Debug().Msg("No metrics available") - return statusMetrics - } - } -} - -func stop(state *KafkaBrokerAttackState) (*action_kit_api.StopResult, error) { - executionRunData, err := loadExecutionRunData(state.ExecutionID) - if err != nil { - log.Debug().Err(err).Msg("Execution run data not found, stop was already called") - return nil, nil - } - stopTickers(executionRunData) - - latestMetrics := retrieveLatestMetrics(executionRunData.metrics) - // calculate the success rate - successRate := float64(executionRunData.requestSuccessCounter.Load()) / float64(executionRunData.requestCounter.Load()) * 100 - log.Debug().Msgf("Success Rate: %v%%", successRate) - ExecutionRunDataMap.Delete(state.ExecutionID) - if successRate < float64(state.SuccessRate) { - log.Info().Msgf("Success Rate (%.2f%%) was below %v%%", successRate, state.SuccessRate) - return extutil.Ptr(action_kit_api.StopResult{ - Metrics: extutil.Ptr(latestMetrics), - Error: &action_kit_api.ActionKitError{ - Title: fmt.Sprintf("Success Rate (%.2f%%) was below %v%%", successRate, state.SuccessRate), - Status: extutil.Ptr(action_kit_api.Failed), - }, - }), nil - } - log.Info().Msgf("Success Rate (%.2f%%) was above/equal %v%%", successRate, state.SuccessRate) - return extutil.Ptr(action_kit_api.StopResult{ - Metrics: extutil.Ptr(latestMetrics), - }), nil -} - -func stopTickers(executionRunData *ExecutionRunData) { - ticker := executionRunData.tickers - if ticker != nil { - ticker.Stop() - } - // non-blocking send - select { - case executionRunData.stopTicker <- true: // stop the ticker - log.Trace().Msg("Stopped ticker") - default: - log.Debug().Msg("Ticker already stopped") - } -} diff --git a/extkafka/produceFixAmount.go b/extkafka/produceFixAmount.go deleted file mode 100644 index 343a862..0000000 --- a/extkafka/produceFixAmount.go +++ /dev/null @@ -1,176 +0,0 @@ -// SPDX-License-Identifier: MIT -// SPDX-FileCopyrightText: 2025 Steadybit GmbH - -package extkafka - -import ( - "context" - "errors" - "fmt" - "github.com/google/uuid" - "github.com/rs/zerolog/log" - "github.com/steadybit/action-kit/go/action_kit_api/v2" - "github.com/steadybit/action-kit/go/action_kit_sdk" - "github.com/steadybit/extension-kit/extbuild" - "github.com/steadybit/extension-kit/extutil" -) - -type produceMessageActionFixedAmount struct{} - -// Make sure Action implements all required interfaces -var ( - _ action_kit_sdk.Action[KafkaBrokerAttackState] = (*produceMessageActionFixedAmount)(nil) - _ action_kit_sdk.ActionWithStatus[KafkaBrokerAttackState] = (*produceMessageActionFixedAmount)(nil) - - _ action_kit_sdk.ActionWithStop[KafkaBrokerAttackState] = (*produceMessageActionFixedAmount)(nil) -) - -func NewProduceMessageActionFixedAmount() action_kit_sdk.Action[KafkaBrokerAttackState] { - return &produceMessageActionFixedAmount{} -} - -func (l *produceMessageActionFixedAmount) NewEmptyState() KafkaBrokerAttackState { - return KafkaBrokerAttackState{} -} - -// Describe returns the action description for the platform with all required information. -func (l *produceMessageActionFixedAmount) Describe() action_kit_api.ActionDescription { - return action_kit_api.ActionDescription{ - Id: fmt.Sprintf("%s.produce-fixed-amount", kafkaTopicTargetId), - Label: "Produce (# of Records)", - Description: "Produce a certain amount of kafka records for a given duration", - Version: extbuild.GetSemverVersionStringOrUnknown(), - Icon: extutil.Ptr(kafkaIcon), - TargetSelection: extutil.Ptr(action_kit_api.TargetSelection{ - TargetType: kafkaTopicTargetId, - SelectionTemplates: extutil.Ptr([]action_kit_api.TargetSelectionTemplate{ - { - Label: "topic name", - Description: extutil.Ptr("Find topic by name"), - Query: "kafka.topic.name=\"\"", - }, - }), - }), - //Widgets: extutil.Ptr([]action_kit_api.Widget{ - // action_kit_api.PredefinedWidget{ - // Type: action_kit_api.ComSteadybitWidgetPredefined, - // PredefinedWidgetId: "com.steadybit.widget.predefined.HttpCheck", - // }, - //}), - - // Technology for the targets to appear in - Technology: extutil.Ptr("Kafka"), - Category: extutil.Ptr("Kafka"), - - // To clarify the purpose of the action: - // Check: Will perform checks on the targets - Kind: action_kit_api.Attack, - - // How the action is controlled over time. - // External: The agent takes care and calls stop then the time has passed. Requires a duration parameter. Use this when the duration is known in advance. - // Internal: The action has to implement the status endpoint to signal when the action is done. Use this when the duration is not known in advance. - // Instantaneous: The action is done immediately. Use this for actions that happen immediately, e.g. a reboot. - TimeControl: action_kit_api.TimeControlInternal, - - // The parameters for the action - Parameters: []action_kit_api.ActionParameter{ - //------------------------ - // Request Definition - //------------------------ - recordKey, - recordValue, - recordHeaders, - { - Name: "-", - Label: "-", - Type: action_kit_api.ActionParameterTypeSeparator, - Order: extutil.Ptr(5), - }, - { - Name: "numberOfRecords", - Label: "Number of Records.", - Description: extutil.Ptr("Fixed number of Records, distributed to given duration"), - Type: action_kit_api.ActionParameterTypeInteger, - Required: extutil.Ptr(true), - DefaultValue: extutil.Ptr("1"), - }, - duration, - { - Name: "-", - Label: "-", - Type: action_kit_api.ActionParameterTypeSeparator, - Order: extutil.Ptr(9), - }, - successRate, - //------------------------ - // Additional Settings - //------------------------ - - maxConcurrent, - }, - Status: extutil.Ptr(action_kit_api.MutatingEndpointReferenceWithCallInterval{ - CallInterval: extutil.Ptr("1s"), - }), - Stop: extutil.Ptr(action_kit_api.MutatingEndpointReference{}), - } -} - -func getDelayBetweenRequestsInMsFixedAmount(duration int64, numberOfRequests int64) int64 { - if duration > 0 && numberOfRequests > 0 { - return duration / (numberOfRequests) - } else { - return 1000 / 1 - } -} - -func (l *produceMessageActionFixedAmount) Prepare(_ context.Context, state *KafkaBrokerAttackState, request action_kit_api.PrepareActionRequestBody) (*action_kit_api.PrepareResult, error) { - if extutil.ToInt64(request.Config["duration"]) == 0 { - return nil, errors.New("duration must be greater than 0") - } - state.DelayBetweenRequestsInMS = getDelayBetweenRequestsInMsFixedAmount(extutil.ToInt64(request.Config["duration"]), extutil.ToInt64(request.Config["numberOfRecords"])) - - return prepare(request, state, checkEndedProduceFixedAmount) -} - -func checkEndedProduceFixedAmount(executionRunData *ExecutionRunData, state *KafkaBrokerAttackState) bool { - result := executionRunData.requestCounter.Load() >= state.NumberOfRecords - return result -} - -// Start is called to start the action -// You can mutate the state here. -// You can use the result to return messages/errors/metrics or artifacts -func (l *produceMessageActionFixedAmount) Start(_ context.Context, state *KafkaBrokerAttackState) (*action_kit_api.StartResult, error) { - start(state) - return nil, nil -} - -// Status is called to get the current status of the action -func (l *produceMessageActionFixedAmount) Status(_ context.Context, state *KafkaBrokerAttackState) (*action_kit_api.StatusResult, error) { - executionRunData, err := loadExecutionRunData(state.ExecutionID) - if err != nil { - log.Error().Err(err).Msg("Failed to load execution run data") - return nil, err - } - - completed := checkEndedProduceFixedAmount(executionRunData, state) - if completed { - stopTickers(executionRunData) - log.Info().Msg("Action completed") - } - - latestMetrics := retrieveLatestMetrics(executionRunData.metrics) - - return &action_kit_api.StatusResult{ - Completed: completed, - Metrics: extutil.Ptr(latestMetrics), - }, nil -} - -func (l *produceMessageActionFixedAmount) Stop(_ context.Context, state *KafkaBrokerAttackState) (*action_kit_api.StopResult, error) { - return stop(state) -} - -func (l *produceMessageActionFixedAmount) getExecutionRunData(executionID uuid.UUID) (*ExecutionRunData, error) { - return loadExecutionRunData(executionID) -} diff --git a/extkafka/produceFixAmount_test.go b/extkafka/produceFixAmount_test.go deleted file mode 100644 index 8510b3a..0000000 --- a/extkafka/produceFixAmount_test.go +++ /dev/null @@ -1,221 +0,0 @@ -// SPDX-License-Identifier: MIT -// SPDX-FileCopyrightText: 2025 Steadybit GmbH - -package extkafka - -import ( - "github.com/google/uuid" - "github.com/steadybit/action-kit/go/action_kit_api/v2" - "github.com/steadybit/extension-kafka/config" - "github.com/steadybit/extension-kit/extutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/twmb/franz-go/pkg/kfake" - "strings" - "testing" - "time" -) - -func TestNewHTTPCheckActionFixedAmount_Prepare(t *testing.T) { - action := produceMessageActionFixedAmount{} - tests := []struct { - name string - requestBody action_kit_api.PrepareActionRequestBody - wantedError error - wantedState *KafkaBrokerAttackState - }{ - { - name: "Should return config", - requestBody: extutil.JsonMangle(action_kit_api.PrepareActionRequestBody{ - Target: &action_kit_api.Target{ - Attributes: map[string][]string{ - "kafka.topic.name": {"steadybit"}, - }, - }, - Config: map[string]interface{}{ - "numberOfRecords": 10, - "maxConcurrent": 4, - "recordKey": "steadybit5", - "recordValue": "test5", - "recordHeaders": []any{ - map[string]any{"key": "test", "value": "test"}, - }, - "duration": 10000, - }, - ExecutionId: uuid.New(), - }), - wantedState: &KafkaBrokerAttackState{ - ConsumerGroup: "", - Topic: "steadybit", - RecordKey: "steadybit5", - RecordValue: "test5", - MaxConcurrent: 4, - NumberOfRecords: 10, - RecordHeaders: map[string]string{"test": "test"}, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - //Given - state := action.NewEmptyState() - request := tt.requestBody - - //When - _, err := action.Prepare(t.Context(), &state, request) - - //Then - if tt.wantedError != nil { - assert.EqualError(t, err, tt.wantedError.Error()) - } - if tt.wantedState != nil { - assert.NoError(t, err) - assert.NoError(t, err) - assert.Equal(t, tt.wantedState.RecordHeaders, state.RecordHeaders) - assert.Equal(t, tt.wantedState.MaxConcurrent, state.MaxConcurrent) - assert.Equal(t, tt.wantedState.NumberOfRecords, state.NumberOfRecords) - assert.Equal(t, tt.wantedState.SuccessRate, state.SuccessRate) - assert.NotNil(t, state.ExecutionID) - assert.NotNil(t, state.Timeout) - } - }) - } -} - -func TestNewHTTPCheckActionFixedAmount_All_Success(t *testing.T) { - c, err := kfake.NewCluster( - kfake.SeedTopics(-1, "steadybit"), - kfake.NumBrokers(1), - ) - require.NoError(t, err) - defer c.Close() - - seeds := c.ListenAddrs() - config.Config.SeedBrokers = strings.Join(seeds, ",") - - action := produceMessageActionFixedAmount{} - state := action.NewEmptyState() - prepareActionRequestBody := extutil.JsonMangle(action_kit_api.PrepareActionRequestBody{ - Target: &action_kit_api.Target{ - Attributes: map[string][]string{ - "kafka.topic.name": {"steadybit"}, - }, - }, - Config: map[string]interface{}{ - "numberOfRecords": 10, - "maxConcurrent": 4, - "recordKey": "steadybit5", - "recordValue": "test5", - "recordHeaders": []any{ - map[string]any{"key": "test", "value": "test"}, - }, - "recordAttributes": "243", - "duration": 10000, - }, - ExecutionId: uuid.New(), - }) - - // Prepare - prepareResult, err := action.Prepare(t.Context(), &state, prepareActionRequestBody) - assert.NoError(t, err) - assert.Nil(t, prepareResult) - assert.Greater(t, state.DelayBetweenRequestsInMS, extutil.ToInt64(0)) - - executionRunData, err := action.getExecutionRunData(state.ExecutionID) - assert.NoError(t, err) - assert.NotNil(t, executionRunData) - - // Start - startResult, err := action.Start(t.Context(), &state) - assert.NoError(t, err) - assert.Nil(t, startResult) - - // Status - statusResult, err := action.Status(t.Context(), &state) - assert.NoError(t, err) - assert.NotNil(t, statusResult.Metrics) - - time.Sleep(10 * time.Second) - - // Status completed - statusResult, err = action.Status(t.Context(), &state) - assert.NoError(t, err) - assert.Equal(t, true, statusResult.Completed) - - assert.Equal(t, uint64(10), executionRunData.requestCounter.Load()) - - // Stop - stopResult, err := action.Stop(t.Context(), &state) - assert.NoError(t, err) - assert.NotNil(t, stopResult.Metrics) - assert.Nil(t, stopResult.Error) - assert.Equal(t, executionRunData.requestSuccessCounter.Load(), uint64(10)) -} - -func TestNewHTTPCheckActionFixedAmount_All_Failure(t *testing.T) { - c, err := kfake.NewCluster( - kfake.SeedTopics(-1, "steadybit"), - kfake.NumBrokers(1), - ) - require.NoError(t, err) - defer c.Close() - - seeds := c.ListenAddrs() - config.Config.SeedBrokers = strings.Join(seeds, ",") - - action := produceMessageActionFixedAmount{} - state := action.NewEmptyState() - prepareActionRequestBody := extutil.JsonMangle(action_kit_api.PrepareActionRequestBody{ - Target: &action_kit_api.Target{ - Attributes: map[string][]string{ - "kafka.topic.name": {"invalid"}, - }, - }, - Config: map[string]interface{}{ - "numberOfRecords": 1, - "maxConcurrent": 4, - "recordKey": "steadybit5", - "recordValue": "test5", - "recordHeaders": []any{ - map[string]any{"key": "test", "value": "test"}, - }, - "duration": 10000, - "successRate": 100, - }, - ExecutionId: uuid.New(), - }) - - // Prepare - prepareResult, err := action.Prepare(t.Context(), &state, prepareActionRequestBody) - assert.NoError(t, err) - assert.Nil(t, prepareResult) - assert.Greater(t, state.DelayBetweenRequestsInMS, extutil.ToInt64(0)) - - // Start - startResult, err := action.Start(t.Context(), &state) - assert.NoError(t, err) - assert.Nil(t, startResult) - - // Status - statusResult, err := action.Status(t.Context(), &state) - assert.NoError(t, err) - assert.NotNil(t, statusResult.Metrics) - - time.Sleep(5 * time.Second) - - // Status completed - statusResult, err = action.Status(t.Context(), &state) - assert.NoError(t, err) - assert.Equal(t, true, statusResult.Completed) - - executionRunData, err := action.getExecutionRunData(state.ExecutionID) - assert.NoError(t, err) - assert.Greater(t, executionRunData.requestCounter.Load(), uint64(0)) - // Stop - stopResult, err := action.Stop(t.Context(), &state) - assert.NoError(t, err) - assert.NotNil(t, stopResult.Metrics) - assert.NotNil(t, stopResult.Error) - assert.Equal(t, stopResult.Error.Title, "Success Rate (0.00%) was below 100%") - assert.Equal(t, executionRunData.requestSuccessCounter.Load(), uint64(0)) -} diff --git a/extkafka/producePeriodically.go b/extkafka/producePeriodically.go deleted file mode 100644 index 53fbc2d..0000000 --- a/extkafka/producePeriodically.go +++ /dev/null @@ -1,158 +0,0 @@ -// SPDX-License-Identifier: MIT -// SPDX-FileCopyrightText: 2025 Steadybit GmbH - -package extkafka - -import ( - "context" - "fmt" - "github.com/google/uuid" - "github.com/rs/zerolog/log" - "github.com/steadybit/action-kit/go/action_kit_api/v2" - "github.com/steadybit/action-kit/go/action_kit_sdk" - "github.com/steadybit/extension-kit/extbuild" - "github.com/steadybit/extension-kit/extutil" -) - -type produceMessageActionPeriodically struct{} - -// Make sure Action implements all required interfaces -var ( - _ action_kit_sdk.Action[KafkaBrokerAttackState] = (*produceMessageActionPeriodically)(nil) - _ action_kit_sdk.ActionWithStatus[KafkaBrokerAttackState] = (*produceMessageActionPeriodically)(nil) - - _ action_kit_sdk.ActionWithStop[KafkaBrokerAttackState] = (*produceMessageActionPeriodically)(nil) -) - -func NewProduceMessageActionPeriodically() action_kit_sdk.Action[KafkaBrokerAttackState] { - return &produceMessageActionPeriodically{} -} - -func (l *produceMessageActionPeriodically) NewEmptyState() KafkaBrokerAttackState { - return KafkaBrokerAttackState{} -} - -// Describe returns the action description for the platform with all required information. -func (l *produceMessageActionPeriodically) Describe() action_kit_api.ActionDescription { - return action_kit_api.ActionDescription{ - Id: fmt.Sprintf("%s.produce-periodically", kafkaTopicTargetId), - Label: "Produce (Records / s)", - Description: "Produce records periodically (records per second)", - Version: extbuild.GetSemverVersionStringOrUnknown(), - Icon: extutil.Ptr(kafkaIcon), - TargetSelection: extutil.Ptr(action_kit_api.TargetSelection{ - TargetType: kafkaTopicTargetId, - SelectionTemplates: extutil.Ptr([]action_kit_api.TargetSelectionTemplate{ - { - Label: "topic name", - Description: extutil.Ptr("Find topic by name"), - Query: "kafka.topic.name=\"\"", - }, - }), - }), - //Widgets: extutil.Ptr([]action_kit_api.Widget{ - // action_kit_api.PredefinedWidget{ - // Type: action_kit_api.ComSteadybitWidgetPredefined, - // PredefinedWidgetId: "com.steadybit.widget.predefined.HttpCheck", - // }, - //}), - Technology: extutil.Ptr("Kafka"), - Category: extutil.Ptr("Kafka"), - // To clarify the purpose of the action: - // Check: Will perform checks on the targets - Kind: action_kit_api.Attack, - - // How the action is controlled over time. - // External: The agent takes care and calls stop then the time has passed. Requires a duration parameter. Use this when the duration is known in advance. - // Internal: The action has to implement the status endpoint to signal when the action is done. Use this when the duration is not known in advance. - // Instantaneous: The action is done immediately. Use this for actions that happen immediately, e.g. a reboot. - TimeControl: action_kit_api.TimeControlExternal, - - // The parameters for the action - Parameters: []action_kit_api.ActionParameter{ - //------------------------ - // Request Definition - //------------------------ - recordKey, - recordValue, - recordHeaders, - { - Name: "-", - Label: "-", - Type: action_kit_api.ActionParameterTypeSeparator, - Order: extutil.Ptr(5), - }, - { - Name: "recordsPerSecond", - Label: "Records per second", - Description: extutil.Ptr("The number of records per second. Should be between 1 and 10."), - Type: action_kit_api.ActionParameterTypeInteger, - DefaultValue: extutil.Ptr("1"), - MinValue: extutil.Ptr(1), - MaxValue: extutil.Ptr(10), - Required: extutil.Ptr(true), - }, - duration, - { - Name: "-", - Label: "-", - Type: action_kit_api.ActionParameterTypeSeparator, - Order: extutil.Ptr(9), - }, - successRate, - - //------------------------ - // Additional Settings - //------------------------ - - maxConcurrent, - }, - Status: extutil.Ptr(action_kit_api.MutatingEndpointReferenceWithCallInterval{ - CallInterval: extutil.Ptr("1s"), - }), - Stop: extutil.Ptr(action_kit_api.MutatingEndpointReference{}), - } -} - -func getDelayBetweenRequestsInMsPeriodically(recordsPerSecond int64) int64 { - if recordsPerSecond > 0 { - return 1000 / recordsPerSecond - } else { - return 1000 / 1 - } -} - -func (l *produceMessageActionPeriodically) Prepare(_ context.Context, state *KafkaBrokerAttackState, request action_kit_api.PrepareActionRequestBody) (*action_kit_api.PrepareResult, error) { - state.DelayBetweenRequestsInMS = getDelayBetweenRequestsInMsPeriodically(extutil.ToInt64(request.Config["recordsPerSecond"])) - return prepare(request, state, func(executionRunData *ExecutionRunData, state *KafkaBrokerAttackState) bool { return false }) -} - -// Start is called to start the action -// You can mutate the state here. -// You can use the result to return messages/errors/metrics or artifacts -func (l *produceMessageActionPeriodically) Start(_ context.Context, state *KafkaBrokerAttackState) (*action_kit_api.StartResult, error) { - start(state) - return nil, nil -} - -// Status is called to get the current status of the action -func (l *produceMessageActionPeriodically) Status(_ context.Context, state *KafkaBrokerAttackState) (*action_kit_api.StatusResult, error) { - executionRunData, err := loadExecutionRunData(state.ExecutionID) - if err != nil { - log.Error().Err(err).Msg("Failed to load execution run data") - return nil, err - } - latestMetrics := retrieveLatestMetrics(executionRunData.metrics) - return &action_kit_api.StatusResult{ - Completed: false, - Metrics: extutil.Ptr(latestMetrics), - }, nil -} - -func (l *produceMessageActionPeriodically) Stop(_ context.Context, state *KafkaBrokerAttackState) (*action_kit_api.StopResult, error) { - return stop(state) -} - -func (l *produceMessageActionPeriodically) getExecutionRunData(executionID uuid.UUID) (*ExecutionRunData, error) { - return loadExecutionRunData(executionID) -} diff --git a/extkafka/producePeriodically_test.go b/extkafka/producePeriodically_test.go deleted file mode 100644 index 60b909b..0000000 --- a/extkafka/producePeriodically_test.go +++ /dev/null @@ -1,252 +0,0 @@ -// SPDX-License-Identifier: MIT -// SPDX-FileCopyrightText: 2025 Steadybit GmbH - -package extkafka - -import ( - "github.com/google/uuid" - "github.com/steadybit/action-kit/go/action_kit_api/v2" - "github.com/steadybit/extension-kafka/config" - extension_kit "github.com/steadybit/extension-kit" - "github.com/steadybit/extension-kit/extutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/twmb/franz-go/pkg/kfake" - "strings" - "testing" - "time" -) - -func TestNewProduceMessageActionPeriodically_Describe(t *testing.T) { - action := produceMessageActionPeriodically{} - - description := action.Describe() - - assert.Equal(t, "Produce (Records / s)", description.Label) - assert.Equal(t, "Produce records periodically (records per second)", description.Description) - assert.Equal(t, kafkaTopicTargetId, description.TargetSelection.TargetType) -} - -func TestNewProduceMessageActionPeriodically_Prepare(t *testing.T) { - action := produceMessageActionPeriodically{} - - tests := []struct { - name string - requestBody action_kit_api.PrepareActionRequestBody - wantedError error - wantedState *KafkaBrokerAttackState - }{ - { - name: "Should return config", - requestBody: extutil.JsonMangle(action_kit_api.PrepareActionRequestBody{ - Target: &action_kit_api.Target{ - Attributes: map[string][]string{ - "kafka.topic.name": {"steadybit"}, - }, - }, - Config: map[string]interface{}{ - "recordsPerSecond": 1, - "maxConcurrent": 2, - "recordKey": "steadybit5", - "recordValue": "test5", - "recordHeaders": []any{ - map[string]any{"key": "test", "value": "test"}, - }, - "duration": 10000, - }, - ExecutionId: uuid.New(), - }), - wantedState: &KafkaBrokerAttackState{ - ConsumerGroup: "", - Topic: "steadybit", - RecordKey: "steadybit5", - RecordValue: "test5", - MaxConcurrent: 2, - DelayBetweenRequestsInMS: 1000, - RecordHeaders: map[string]string{"test": "test"}, - }, - }, - { - name: "Should return error", - requestBody: extutil.JsonMangle(action_kit_api.PrepareActionRequestBody{ - Target: &action_kit_api.Target{ - Attributes: map[string][]string{}, - }, - Config: map[string]interface{}{ - "recordsPerSecond": 1, - "maxConcurrent": 2, - "recordKey": "steadybit5", - "recordValue": "test5", - "recordHeaders": []any{ - map[string]any{"key": "test", "value": "test"}, - }, - "duration": 10000, - }, - ExecutionId: uuid.New(), - }), - wantedError: extension_kit.ToError("the target is missing the kafka.topic.name attribute", nil), - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - //Given - state := action.NewEmptyState() - request := tt.requestBody - - //When - _, err := action.Prepare(t.Context(), &state, request) - - //Then - if tt.wantedError != nil { - assert.EqualError(t, err, tt.wantedError.Error()) - } - if tt.wantedState != nil { - assert.NoError(t, err) - assert.NoError(t, err) - assert.Equal(t, tt.wantedState.RecordHeaders, state.RecordHeaders) - assert.Equal(t, tt.wantedState.MaxConcurrent, state.MaxConcurrent) - assert.Equal(t, tt.wantedState.DelayBetweenRequestsInMS, state.DelayBetweenRequestsInMS) - assert.Equal(t, tt.wantedState.SuccessRate, state.SuccessRate) - assert.NotNil(t, state.ExecutionID) - assert.NotNil(t, state.Timeout) - } - }) - } -} - -func TestNewHTTPCheckActionPeriodically_All_Success(t *testing.T) { - c, err := kfake.NewCluster( - kfake.SeedTopics(-1, "steadybit"), - kfake.NumBrokers(1), - ) - require.NoError(t, err) - defer c.Close() - - seeds := c.ListenAddrs() - config.Config.SeedBrokers = strings.Join(seeds, ",") - - action := produceMessageActionPeriodically{} - state := action.NewEmptyState() - prepareActionRequestBody := extutil.JsonMangle(action_kit_api.PrepareActionRequestBody{ - Target: &action_kit_api.Target{ - Attributes: map[string][]string{ - "kafka.topic.name": {"steadybit"}, - }, - }, - Config: map[string]interface{}{ - "recordsPerSecond": 1, - "maxConcurrent": 2, - "recordKey": "steadybit5", - "recordValue": "test5", - "recordHeaders": []any{ - map[string]any{"key": "test", "value": "test"}, - }, - "duration": 10000, - }, - ExecutionId: uuid.New(), - }) - - // Prepare - prepareResult, err := action.Prepare(t.Context(), &state, prepareActionRequestBody) - assert.NoError(t, err) - assert.Nil(t, prepareResult) - assert.Greater(t, state.DelayBetweenRequestsInMS, extutil.ToInt64(0)) - - executionRunData, err := action.getExecutionRunData(state.ExecutionID) - assert.NoError(t, err) - assert.NotNil(t, executionRunData) - - // Start - startResult, err := action.Start(t.Context(), &state) - assert.NoError(t, err) - assert.Nil(t, startResult) - - // Status - statusResult, err := action.Status(t.Context(), &state) - assert.NoError(t, err) - assert.NotNil(t, statusResult.Metrics) - - time.Sleep(10 * time.Second) - - // Status completed - statusResult, err = action.Status(t.Context(), &state) - assert.NoError(t, err) - assert.Equal(t, false, statusResult.Completed) - - // Stop - stopResult, err := action.Stop(t.Context(), &state) - assert.NoError(t, err) - assert.NotNil(t, stopResult.Metrics) - assert.Nil(t, stopResult.Error) - assert.Greater(t, executionRunData.requestSuccessCounter.Load(), uint64(9)) -} - -func TestNewHTTPCheckActionPeriodically_All_Failure(t *testing.T) { - c, err := kfake.NewCluster( - kfake.SeedTopics(-1, "steadybit"), - kfake.NumBrokers(1), - ) - require.NoError(t, err) - defer c.Close() - - seeds := c.ListenAddrs() - config.Config.SeedBrokers = strings.Join(seeds, ",") - - action := produceMessageActionPeriodically{} - state := action.NewEmptyState() - prepareActionRequestBody := extutil.JsonMangle(action_kit_api.PrepareActionRequestBody{ - Target: &action_kit_api.Target{ - Attributes: map[string][]string{ - "kafka.topic.name": {"invalid"}, - }, - }, - Config: map[string]interface{}{ - "recordsPerSecond": 1, - "maxConcurrent": 1, - "recordKey": "steadybit5", - "recordValue": "test5", - "recordHeaders": []any{ - map[string]any{"key": "test", "value": "test"}, - }, - "recordAttributes": "243", - "duration": 10000, - "successRate": 100, - }, - ExecutionId: uuid.New(), - }) - - // Prepare - prepareResult, err := action.Prepare(t.Context(), &state, prepareActionRequestBody) - assert.NoError(t, err) - assert.Nil(t, prepareResult) - assert.Greater(t, state.DelayBetweenRequestsInMS, extutil.ToInt64(0)) - - // Start - startResult, err := action.Start(t.Context(), &state) - assert.NoError(t, err) - assert.Nil(t, startResult) - - // Status - statusResult, err := action.Status(t.Context(), &state) - assert.NoError(t, err) - assert.NotNil(t, statusResult.Metrics) - - time.Sleep(5 * time.Second) - - // Status completed - statusResult, err = action.Status(t.Context(), &state) - assert.NoError(t, err) - assert.Equal(t, statusResult.Completed, false) - - executionRunData, err := action.getExecutionRunData(state.ExecutionID) - assert.NoError(t, err) - assert.Greater(t, executionRunData.requestCounter.Load(), uint64(0)) - - // Stop - stopResult, err := action.Stop(t.Context(), &state) - assert.NoError(t, err) - assert.NotNil(t, stopResult.Metrics) - assert.NotNil(t, stopResult.Error) - assert.Equal(t, stopResult.Error.Title, "Success Rate (0.00%) was below 100%") - assert.Equal(t, executionRunData.requestSuccessCounter.Load(), uint64(0)) -} diff --git a/extkafka/produce_test.go b/extkafka/produce_test.go deleted file mode 100644 index f72188a..0000000 --- a/extkafka/produce_test.go +++ /dev/null @@ -1,173 +0,0 @@ -// SPDX-License-Identifier: MIT -// SPDX-FileCopyrightText: 2025 Steadybit GmbH - -package extkafka - -import ( - "github.com/google/uuid" - "github.com/steadybit/action-kit/go/action_kit_api/v2" - extension_kit "github.com/steadybit/extension-kit" - "github.com/steadybit/extension-kit/extutil" - "github.com/stretchr/testify/assert" - "sync/atomic" - "testing" -) - -func TestAction_Prepare(t *testing.T) { - tests := []struct { - name string - requestBody action_kit_api.PrepareActionRequestBody - wantedError error - wantedState *KafkaBrokerAttackState - }{ - { - name: "Should return config", - requestBody: extutil.JsonMangle(action_kit_api.PrepareActionRequestBody{ - Target: &action_kit_api.Target{ - Attributes: map[string][]string{ - "kafka.topic.name": {"steadybit"}, - }, - }, - Config: map[string]interface{}{ - "numberOfRecords": 10, - "maxConcurrent": 4, - "recordKey": "steadybit5", - "recordValue": "test5", - "recordHeaders": []any{ - map[string]any{"key": "test", "value": "test"}, - }, - "recordAttributes": "243", - "duration": 10000, - }, - ExecutionId: uuid.New(), - }), - wantedState: &KafkaBrokerAttackState{ - ConsumerGroup: "", - Topic: "steadybit", - RecordKey: "steadybit5", - RecordValue: "test5", - MaxConcurrent: 4, - NumberOfRecords: 10, - RecordHeaders: map[string]string{"test": "test"}, - }, - }, - { - name: "Should return error for recordHeaders", - requestBody: extutil.JsonMangle(action_kit_api.PrepareActionRequestBody{ - Target: &action_kit_api.Target{ - Attributes: map[string][]string{ - "kafka.topic.name": {"steadybit"}, - }, - }, - Config: map[string]interface{}{ - "action": "prepare", - "maxConcurrent": 4, - "recordHeaders": "test:test", - }, - ExecutionId: uuid.New(), - }), - wantedError: extension_kit.ToError("failed to interpret config value for recordHeaders as a key/value array", nil), - }, - { - name: "Should return error for maxConcurrent", - requestBody: extutil.JsonMangle(action_kit_api.PrepareActionRequestBody{ - Target: &action_kit_api.Target{ - Attributes: map[string][]string{ - "kafka.topic.name": {"steadybit"}, - }, - }, - Config: map[string]interface{}{ - "action": "prepare", - "maxConcurrent": 0, - }, - ExecutionId: uuid.New(), - }), - wantedError: extension_kit.ToError("max concurrent can't be zero", nil), - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - //Given - state := KafkaBrokerAttackState{} - request := tt.requestBody - - //When - _, err := prepare(request, &state, func(executionRunData *ExecutionRunData, state *KafkaBrokerAttackState) bool { return false }) - - //Then - if tt.wantedError != nil { - assert.EqualError(t, err, tt.wantedError.Error()) - } - if tt.wantedState != nil { - assert.NoError(t, err) - assert.Equal(t, tt.wantedState.RecordHeaders, state.RecordHeaders) - assert.Equal(t, tt.wantedState.MaxConcurrent, state.MaxConcurrent) - assert.Equal(t, tt.wantedState.NumberOfRecords, state.NumberOfRecords) - assert.Equal(t, tt.wantedState.SuccessRate, state.SuccessRate) - assert.NotNil(t, state.ExecutionID) - assert.NotNil(t, state.Timeout) - } - }) - } -} - -func TestAction_Stop(t *testing.T) { - tests := []struct { - name string - requestBody action_kit_api.StopActionRequestBody - state *KafkaBrokerAttackState - executionRunData *ExecutionRunData - wantedError error - }{ - { - name: "Should successfully stop the action", - requestBody: extutil.JsonMangle(action_kit_api.StopActionRequestBody{}), - state: &KafkaBrokerAttackState{ - ExecutionID: uuid.New(), - SuccessRate: 40, - }, - executionRunData: getExecutionRunData(5, 10), - wantedError: nil, - }, { - name: "Should fail because of low success rate", - requestBody: extutil.JsonMangle(action_kit_api.StopActionRequestBody{}), - state: &KafkaBrokerAttackState{ - ExecutionID: uuid.New(), - SuccessRate: 100, - }, - executionRunData: getExecutionRunData(4, 11), - wantedError: extension_kit.ToError("Success Rate (36.36%) was below 100%", nil), - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - //Given - saveExecutionRunData(tt.state.ExecutionID, tt.executionRunData) - - //When - result, err := stop(tt.state) - - //Then - if tt.wantedError != nil && result.Error == nil { - assert.EqualError(t, err, tt.wantedError.Error()) - } else if tt.wantedError != nil && result.Error != nil { - assert.Equal(t, tt.wantedError.Error(), result.Error.Title) - } else if tt.wantedError == nil && result.Error != nil { - assert.Fail(t, "Should not have error", result.Error.Title) - } else { - assert.NoError(t, err) - } - }) - } -} - -func getExecutionRunData(successCounter uint64, counter uint64) *ExecutionRunData { - data := &ExecutionRunData{ - requestSuccessCounter: atomic.Uint64{}, - requestCounter: atomic.Uint64{}, - } - data.requestCounter.Store(counter) - data.requestSuccessCounter.Store(successCounter) - return data - -} diff --git a/extkafka/topic_discovery.go b/extkafka/topic_discovery.go deleted file mode 100644 index d83a568..0000000 --- a/extkafka/topic_discovery.go +++ /dev/null @@ -1,190 +0,0 @@ -// SPDX-License-Identifier: MIT -// SPDX-FileCopyrightText: 2024 Steadybit GmbH - -package extkafka - -import ( - "context" - "fmt" - "github.com/steadybit/discovery-kit/go/discovery_kit_api" - "github.com/steadybit/discovery-kit/go/discovery_kit_commons" - "github.com/steadybit/discovery-kit/go/discovery_kit_sdk" - "github.com/steadybit/extension-kafka/config" - "github.com/steadybit/extension-kit/extbuild" - "github.com/steadybit/extension-kit/extutil" - "github.com/twmb/franz-go/pkg/kadm" - "strconv" - "strings" - "time" -) - -type kafkaTopicDiscovery struct { -} - -var ( - _ discovery_kit_sdk.TargetDescriber = (*kafkaTopicDiscovery)(nil) - _ discovery_kit_sdk.AttributeDescriber = (*kafkaTopicDiscovery)(nil) -) - -func NewKafkaTopicDiscovery(ctx context.Context) discovery_kit_sdk.TargetDiscovery { - discovery := &kafkaTopicDiscovery{} - return discovery_kit_sdk.NewCachedTargetDiscovery(discovery, - discovery_kit_sdk.WithRefreshTargetsNow(), - discovery_kit_sdk.WithRefreshTargetsInterval(ctx, time.Duration(config.Config.DiscoveryIntervalKafkaTopic)*time.Second), - ) -} - -func (r *kafkaTopicDiscovery) Describe() discovery_kit_api.DiscoveryDescription { - return discovery_kit_api.DiscoveryDescription{ - Id: kafkaTopicTargetId, - Discover: discovery_kit_api.DescribingEndpointReferenceWithCallInterval{ - CallInterval: extutil.Ptr(fmt.Sprintf("%ds", config.Config.DiscoveryIntervalKafkaTopic)), - }, - } -} - -func (r *kafkaTopicDiscovery) DescribeTarget() discovery_kit_api.TargetDescription { - return discovery_kit_api.TargetDescription{ - Id: kafkaTopicTargetId, - Label: discovery_kit_api.PluralLabel{One: "Kafka topic", Other: "Kafka topics"}, - Category: extutil.Ptr("kafka"), - Version: extbuild.GetSemverVersionStringOrUnknown(), - Icon: extutil.Ptr(kafkaIcon), - Table: discovery_kit_api.Table{ - Columns: []discovery_kit_api.Column{ - {Attribute: "steadybit.label"}, - {Attribute: "kafka.topic.name"}, - {Attribute: "kafka.topic.partitions-leaders"}, - {Attribute: "kafka.topic.partitions-replicas"}, - {Attribute: "kafka.topic.partitions-isr"}, - {Attribute: "kafka.topic.replication-factor"}, - }, - OrderBy: []discovery_kit_api.OrderBy{ - { - Attribute: "steadybit.label", - Direction: "ASC", - }, - }, - }, - } -} - -func (r *kafkaTopicDiscovery) DescribeAttributes() []discovery_kit_api.AttributeDescription { - return []discovery_kit_api.AttributeDescription{ - { - Attribute: "kafka.topic.name", - Label: discovery_kit_api.PluralLabel{ - One: "Kafka topic name", - Other: "Kafka topic names", - }, - }, - { - Attribute: "kafka.topic.partitions", - Label: discovery_kit_api.PluralLabel{ - One: "Kafka topic partitions", - Other: "Kafka topic partitions", - }, - }, - { - Attribute: "kafka.topic.partitions-leaders", - Label: discovery_kit_api.PluralLabel{ - One: "Kafka topic partitions leaders", - Other: "Kafka topic partitions leaders", - }, - }, - { - Attribute: "kafka.topic.partitions-replicas", - Label: discovery_kit_api.PluralLabel{ - One: "Kafka topic partitions replicas", - Other: "Kafka topic partitions replicas", - }, - }, - { - Attribute: "kafka.topic.partitions-isr", - Label: discovery_kit_api.PluralLabel{ - One: "Kafka topic partitions in-sync-replicas", - Other: "Kafka topic partitions in-sync-replicas", - }, - }, - { - Attribute: "kafka.topic.replication-factor", - Label: discovery_kit_api.PluralLabel{ - One: "Kafka topic replication factor", - Other: "Kafka topic replication factors", - }, - }, - } -} - -func (r *kafkaTopicDiscovery) DiscoverTargets(ctx context.Context) ([]discovery_kit_api.Target, error) { - return getAllTopics(ctx) -} - -func getAllTopics(ctx context.Context) ([]discovery_kit_api.Target, error) { - result := make([]discovery_kit_api.Target, 0, 20) - - client, err := createNewAdminClient(strings.Split(config.Config.SeedBrokers, ",")) - if err != nil { - return nil, fmt.Errorf("failed to initialize kafka client: %s", err.Error()) - } - defer client.Close() - - // Create topic "franz-go" if it doesn't exist already - topicDetails, err := client.ListTopics(ctx) - if err != nil { - return nil, fmt.Errorf("failed to list topics: %v", err) - } - metadata, err := client.BrokerMetadata(ctx) - if err != nil { - return nil, fmt.Errorf("failed to get brokers metadata : %v", err) - } - - for _, t := range topicDetails { - if !t.IsInternal { - result = append(result, toTopicTarget(t, metadata.Cluster)) - } - } - - return discovery_kit_commons.ApplyAttributeExcludes(result, config.Config.DiscoveryAttributesExcludesTopics), nil -} - -func toTopicTarget(topic kadm.TopicDetail, clusterName string) discovery_kit_api.Target { - label := topic.Topic - - partitions := make([]string, len(topic.Partitions)) - partitionsLeaders := make([]string, len(topic.Partitions)) - partitionsReplicas := make([]string, len(topic.Partitions)) - partitionsInSyncReplicas := make([]string, len(topic.Partitions)) - - for i, partDetail := range topic.Partitions.Sorted() { - partitions[i] = strconv.FormatInt(int64(partDetail.Partition), 10) - } - - for i, partDetail := range topic.Partitions.Sorted() { - partitionsLeaders[i] = fmt.Sprintf("%d->leader=%d", partDetail.Partition, partDetail.Leader) - } - - for i, partDetail := range topic.Partitions.Sorted() { - partitionsReplicas[i] = fmt.Sprintf("%d->replicas=%v", partDetail.Partition, partDetail.Replicas) - } - - for i, partDetail := range topic.Partitions.Sorted() { - partitionsInSyncReplicas[i] = fmt.Sprintf("%d->in-sync-replicas=%v", partDetail.Partition, partDetail.ISR) - } - - attributes := make(map[string][]string) - attributes["kafka.cluster.name"] = []string{clusterName} - attributes["kafka.topic.name"] = []string{topic.Topic} - attributes["kafka.topic.partitions"] = partitions - attributes["kafka.topic.partitions-leaders"] = partitionsLeaders - attributes["kafka.topic.partitions-replicas"] = partitionsReplicas - attributes["kafka.topic.partitions-isr"] = partitionsInSyncReplicas - attributes["kafka.topic.replication-factor"] = []string{fmt.Sprintf("%v", topic.Partitions.NumReplicas())} - - return discovery_kit_api.Target{ - Id: fmt.Sprintf("%s-%s", label, clusterName), - Label: label, - TargetType: kafkaTopicTargetId, - Attributes: attributes, - } -} diff --git a/extkafka/topic_discovery_test.go b/extkafka/topic_discovery_test.go deleted file mode 100644 index 328edc9..0000000 --- a/extkafka/topic_discovery_test.go +++ /dev/null @@ -1,154 +0,0 @@ -// SPDX-License-Identifier: MIT -// SPDX-FileCopyrightText: 2025 Steadybit GmbH - -package extkafka - -import ( - "fmt" - "github.com/steadybit/discovery-kit/go/discovery_kit_api" - "github.com/stretchr/testify/assert" - "reflect" - "testing" - - "github.com/stretchr/testify/require" - "github.com/twmb/franz-go/pkg/kadm" - - "context" - "github.com/steadybit/extension-kafka/config" - "github.com/twmb/franz-go/pkg/kfake" - "strings" -) - -func TestDescribeTopic(t *testing.T) { - desc := (&kafkaTopicDiscovery{}).Describe() - require.Equal(t, kafkaTopicTargetId, desc.Id) - require.NotNil(t, desc.Discover.CallInterval) -} - -func TestDescribeTargetTopic(t *testing.T) { - d := &kafkaTopicDiscovery{} - td := d.DescribeTarget() - - require.Equal(t, kafkaTopicTargetId, td.Id) - require.Equal(t, "Kafka topic", td.Label.One) - require.Equal(t, "Kafka topics", td.Label.Other) - require.NotNil(t, td.Category) - require.Equal(t, "kafka", *td.Category) - require.Len(t, td.Table.Columns, 6) - require.Len(t, td.Table.OrderBy, 1) - - ob := td.Table.OrderBy[0] - require.Equal(t, "steadybit.label", ob.Attribute) - require.Equal(t, discovery_kit_api.OrderByDirection("ASC"), ob.Direction) -} - -func TestDescribeAttributesTopic(t *testing.T) { - attrs := (&kafkaTopicDiscovery{}).DescribeAttributes() - expected := []string{ - "kafka.topic.name", - "kafka.topic.partitions", - "kafka.topic.partitions-leaders", - "kafka.topic.partitions-replicas", - "kafka.topic.partitions-isr", - "kafka.topic.replication-factor", - } - - require.Len(t, attrs, len(expected)) - for _, want := range expected { - found := false - for _, a := range attrs { - if a.Attribute == want { - found = true - break - } - } - assert.Truef(t, found, "DescribeAttributes() missing %q", want) - } -} - -func TestToTopicTargetTopic(t *testing.T) { - td := kadm.TopicDetail{ - Topic: "my-topic", - Partitions: kadm.PartitionDetails{ - 1: {Partition: 1, Leader: 101, Replicas: []int32{101, 102}, ISR: []int32{101}}, - 0: {Partition: 0, Leader: 100, Replicas: []int32{100, 102}, ISR: []int32{100, 102}}, - }, - } - cluster := "cluster-42" - tgt := toTopicTarget(td, cluster) - - // Basic fields - assert.Equal(t, "my-topic-cluster-42", tgt.Id) - assert.Equal(t, "my-topic", tgt.Label) - assert.Equal(t, kafkaTopicTargetId, tgt.TargetType) - - // Attributes - attr := tgt.Attributes - check := func(key string, want []string) { - v, ok := attr[key] - assert.True(t, ok, "missing attribute %q", key) - assert.True(t, reflect.DeepEqual(v, want), "%s = %v; want %v", key, v, want) - } - - check("kafka.cluster.name", []string{cluster}) - check("kafka.topic.name", []string{"my-topic"}) - check("kafka.topic.partitions", []string{"0", "1"}) - check("kafka.topic.partitions-leaders", []string{"0->leader=100", "1->leader=101"}) - check( - "kafka.topic.partitions-replicas", - []string{ - fmt.Sprintf("0->replicas=%v", []int{100, 102}), - fmt.Sprintf("1->replicas=%v", []int{101, 102}), - }, - ) - check( - "kafka.topic.partitions-isr", - []string{ - fmt.Sprintf("0->in-sync-replicas=%v", []int{100, 102}), - fmt.Sprintf("1->in-sync-replicas=%v", []int{101}), - }, - ) - check("kafka.topic.replication-factor", []string{"2"}) -} - -// TestDiscoverTargetsClusterName verifies that the kafka.cluster.name attribute -// is correctly set when discovering topics against a fake Kafka cluster. -func TestDiscoverTopicTargetsClusterName(t *testing.T) { - c, err := kfake.NewCluster( - kfake.SeedTopics(-1, "steadybit"), - kfake.NumBrokers(1), - kfake.ClusterID("test"), - ) - require.NoError(t, err) - defer c.Close() - - // Configure seed brokers for discovery - seeds := c.ListenAddrs() - config.Config.SeedBrokers = strings.Join(seeds, ",") - - // Ensure no excluded attributes - config.Config.DiscoveryAttributesExcludesTopics = nil - - // Discover targets - ctx := context.Background() - targets, err := getAllTopics(ctx) - require.NoError(t, err) - require.NotEmpty(t, targets) - - // Retrieve expected cluster name from metadata - client, err := createNewAdminClient(strings.Split(config.Config.SeedBrokers, ",")) - require.NoError(t, err) - defer client.Close() - - meta, err := client.BrokerMetadata(ctx) - require.NoError(t, err) - expected := meta.Cluster - - // Assert each discovered target has the correct cluster name attribute - for _, tgt := range targets { - values, ok := tgt.Attributes["kafka.cluster.name"] - require.True(t, ok, "missing kafka.cluster.name for target %s", tgt.Id) - require.Len(t, values, 1) - require.Equal(t, expected, values[0]) - } -} diff --git a/extkafka/util.go b/extkafka/util.go deleted file mode 100644 index 3966979..0000000 --- a/extkafka/util.go +++ /dev/null @@ -1,73 +0,0 @@ -// SPDX-License-Identifier: MIT -// SPDX-FileCopyrightText: 2025 Steadybit GmbH - -package extkafka - -import ( - "fmt" - "github.com/rs/zerolog/log" - "github.com/steadybit/action-kit/go/action_kit_api/v2" - "strconv" - "strings" -) - -// resolveStatusCodeExpression resolves the given status code expression into a list of status codes -func resolveStatusCodeExpression(statusCodes string) ([]int, *action_kit_api.ActionKitError) { - result := make([]int, 0) - for _, code := range strings.Split(strings.Trim(statusCodes, " "), ";") { - if strings.Contains(code, "-") { - rangeParts := strings.Split(code, "-") - if len(rangeParts) != 2 { - log.Warn().Msgf("Invalid status code range '%s'", code) - return nil, &action_kit_api.ActionKitError{ - Title: fmt.Sprintf("Invalid status code range '%s'. Please use '-' for ranges and ';' for enumerations. Example: '200-399;429'", code), - } - } - startCode, err := strconv.Atoi(rangeParts[0]) - if err != nil { - log.Warn().Msgf("Invalid status code range '%s'", code) - return nil, &action_kit_api.ActionKitError{ - Title: fmt.Sprintf("Invalid status code range '%s'. Please use '-' for ranges and ';' for enumerations. Example: '200-399;429'", code), - } - } - endCode, err := strconv.Atoi(rangeParts[1]) - if err != nil { - log.Warn().Msgf("Invalid status code range '%s'", code) - return nil, &action_kit_api.ActionKitError{ - Title: fmt.Sprintf("Invalid status code range '%s'. Please use '-' for ranges and ';' for enumerations. Example: '200-399;429'", code), - } - } - for i := startCode; i <= endCode; i++ { - if i < 100 || i > 599 { - log.Warn().Msgf("Invalid status code '%d'", i) - return nil, &action_kit_api.ActionKitError{ - Title: fmt.Sprintf("Invalid status code '%d'. Status code should be between 100 and 599.", i), - } - } - result = append(result, i) - } - } else { - if len(code) == 0 { - log.Error().Msgf("Invalid status code '%s'", code) - return nil, &action_kit_api.ActionKitError{ - Title: "Status code is required.", - } - } - parsed, err := strconv.Atoi(code) - if err != nil { - log.Error().Msgf("Invalid status code '%s'", code) - return nil, &action_kit_api.ActionKitError{ - Title: fmt.Sprintf("Invalid status code '%s'. Please use '-' for ranges and ';' for enumerations. Example: '200-399;429'", code), - } - } - if parsed < 100 || parsed > 599 { - log.Error().Msgf("Invalid status code '%d'", parsed) - return nil, &action_kit_api.ActionKitError{ - Title: fmt.Sprintf("Invalid status code '%d'. Status code should be between 100 and 599.", parsed), - } - } - result = append(result, parsed) - } - } - return result, nil -} diff --git a/extkafka/util_test.go b/extkafka/util_test.go deleted file mode 100644 index 8e5f7e3..0000000 --- a/extkafka/util_test.go +++ /dev/null @@ -1,86 +0,0 @@ -// SPDX-License-Identifier: MIT -// SPDX-FileCopyrightText: 2025 Steadybit GmbH - -package extkafka - -import ( - "github.com/steadybit/action-kit/go/action_kit_api/v2" - "github.com/stretchr/testify/assert" - "testing" -) - -func Test_resolveStatusCodeExpression(t *testing.T) { - type args struct { - statusCodes string - } - tests := []struct { - name string - args args - want []int - error *action_kit_api.ActionKitError - }{ - { - name: "Should return status codes with range", - args: args{ - statusCodes: "200-209", - }, - want: []int{200, 201, 202, 203, 204, 205, 206, 207, 208, 209}, - error: nil, - }, - { - name: "Should return status codes with range and enum", - args: args{ - statusCodes: "201-202;209", - }, - want: []int{201, 202, 209}, - error: nil, - }, - { - name: "Should return error if invalid status code", - args: args{ - statusCodes: "600", - }, - want: nil, - error: &action_kit_api.ActionKitError{Title: "Invalid status code '600'. Status code should be between 100 and 599."}, - }, - { - name: "Should return error if invalid status code range", - args: args{ - statusCodes: "200-", - }, - want: nil, - error: &action_kit_api.ActionKitError{Title: "Invalid status code range '200-'. Please use '-' for ranges and ';' for enumerations. Example: '200-399;429'"}, - }, - { - name: "Should return error if invalid status code range", - args: args{ - statusCodes: "200-;209", - }, - want: nil, - error: &action_kit_api.ActionKitError{Title: "Invalid status code range '200-'. Please use '-' for ranges and ';' for enumerations. Example: '200-399;429'"}, - }, - { - name: "Should return error if invalid status code range", - args: args{ - statusCodes: "200-209;600", - }, - want: nil, - error: &action_kit_api.ActionKitError{Title: "Invalid status code '600'. Status code should be between 100 and 599."}, - }, - { - name: "Should return error if status code is empty", - args: args{ - statusCodes: "", - }, - want: nil, - error: &action_kit_api.ActionKitError{Title: "Status code is required."}, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, got1 := resolveStatusCodeExpression(tt.args.statusCodes) - assert.Equalf(t, tt.want, got, "resolveStatusCodeExpression(%v)", tt.args.statusCodes) - assert.Equalf(t, tt.error, got1, "resolveStatusCodeExpression(%v)", tt.args.statusCodes) - }) - } -} diff --git a/go.mod b/go.mod deleted file mode 100644 index 9260562..0000000 --- a/go.mod +++ /dev/null @@ -1,98 +0,0 @@ -module github.com/steadybit/extension-kafka - -go 1.24.0 - -toolchain go1.24.1 - -require ( - github.com/KimMachineGun/automemlimit v0.7.4 - github.com/google/uuid v1.6.0 - github.com/kelseyhightower/envconfig v1.4.0 - github.com/rs/zerolog v1.34.0 - github.com/steadybit/action-kit/go/action_kit_api/v2 v2.10.0 - github.com/steadybit/action-kit/go/action_kit_sdk v1.3.0 - github.com/steadybit/action-kit/go/action_kit_test v1.4.2 - github.com/steadybit/advice-kit/go/advice_kit_api v1.2.1 - github.com/steadybit/discovery-kit/go/discovery_kit_api v1.7.0 - github.com/steadybit/discovery-kit/go/discovery_kit_commons v0.3.0 - github.com/steadybit/discovery-kit/go/discovery_kit_sdk v1.3.1 - github.com/steadybit/discovery-kit/go/discovery_kit_test v1.2.0 - github.com/steadybit/event-kit/go/event_kit_api v1.6.0 - github.com/steadybit/extension-kit v1.10.0 - github.com/stretchr/testify v1.11.1 - github.com/twmb/franz-go v1.19.5 - github.com/twmb/franz-go/pkg/kadm v1.16.1 - github.com/twmb/franz-go/pkg/kfake v0.0.0-20250711145744-a849b8be17b7 - go.uber.org/automaxprocs v1.6.0 -) - -require ( - github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect - github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/elastic/go-sysinfo v1.15.4 // indirect - github.com/elastic/go-windows v1.0.2 // indirect - github.com/emicklei/go-restful/v3 v3.12.2 // indirect - github.com/fxamacker/cbor/v2 v2.8.0 // indirect - github.com/getkin/kin-openapi v0.133.0 // indirect - github.com/go-logr/logr v1.4.2 // indirect - github.com/go-openapi/jsonpointer v0.22.1 // indirect - github.com/go-openapi/jsonreference v0.21.0 // indirect - github.com/go-openapi/swag v0.23.1 // indirect - github.com/go-openapi/swag/jsonname v0.25.1 // indirect - github.com/go-resty/resty/v2 v2.16.5 // indirect - github.com/gogo/protobuf v1.3.2 // indirect - github.com/google/gnostic-models v0.6.9 // indirect - github.com/google/go-cmp v0.7.0 // indirect - github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect - github.com/josharian/intern v1.0.0 // indirect - github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.18.0 // indirect - github.com/mailru/easyjson v0.9.1 // indirect - github.com/mattn/go-colorable v0.1.14 // indirect - github.com/mattn/go-isatty v0.0.20 // indirect - github.com/moby/spdystream v0.5.0 // indirect - github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect - github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect - github.com/oapi-codegen/runtime v1.1.2 // indirect - github.com/oasdiff/yaml v0.0.0-20250309154309-f31be36b4037 // indirect - github.com/oasdiff/yaml3 v0.0.0-20250309153720-d2182401db90 // indirect - github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect - github.com/perimeterx/marshmallow v1.1.5 // indirect - github.com/pierrec/lz4/v4 v4.1.22 // indirect - github.com/pkg/errors v0.9.1 // indirect - github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/procfs v0.17.0 // indirect - github.com/rs/xid v1.6.0 // indirect - github.com/spf13/pflag v1.0.6 // indirect - github.com/twmb/franz-go/pkg/kmsg v1.11.2 // indirect - github.com/woodsbury/decimal128 v1.4.0 // indirect - github.com/x448/float16 v0.8.4 // indirect - github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0 // indirect - github.com/zmwangx/debounce v1.0.0 // indirect - golang.org/x/crypto v0.41.0 // indirect - golang.org/x/net v0.43.0 // indirect - golang.org/x/oauth2 v0.30.0 // indirect - golang.org/x/sync v0.16.0 // indirect - golang.org/x/sys v0.36.0 // indirect - golang.org/x/term v0.34.0 // indirect - golang.org/x/text v0.28.0 // indirect - golang.org/x/time v0.11.0 // indirect - google.golang.org/protobuf v1.36.6 // indirect - gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect - gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect - howett.net/plist v1.0.1 // indirect - k8s.io/api v0.33.1 // indirect - k8s.io/apimachinery v0.33.1 // indirect - k8s.io/client-go v0.33.1 // indirect - k8s.io/klog/v2 v2.130.1 // indirect - k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect - k8s.io/utils v0.0.0-20250502105355-0f33e8f1c979 // indirect - sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect - sigs.k8s.io/randfill v1.0.0 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.7.0 // indirect - sigs.k8s.io/yaml v1.4.0 // indirect -) diff --git a/go.sum b/go.sum deleted file mode 100644 index b6a9c1c..0000000 --- a/go.sum +++ /dev/null @@ -1,269 +0,0 @@ -github.com/KimMachineGun/automemlimit v0.7.4 h1:UY7QYOIfrr3wjjOAqahFmC3IaQCLWvur9nmfIn6LnWk= -github.com/KimMachineGun/automemlimit v0.7.4/go.mod h1:QZxpHaGOQoYvFhv/r4u3U0JTC2ZcOwbSr11UZF46UBM= -github.com/RaveNoX/go-jsoncommentstrip v1.0.0/go.mod h1:78ihd09MekBnJnxpICcwzCMzGrKSKYe4AqU6PDYYpjk= -github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7Dml6nw9rQ= -github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP2+08jFMw88y4klk= -github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= -github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/bmatcuk/doublestar v1.1.1/go.mod h1:UD6OnuiIn0yFxxA2le/rnRU1G4RaI4UvFv1sNto9p6w= -github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/elastic/go-sysinfo v1.15.4 h1:A3zQcunCxik14MgXu39cXFXcIw2sFXZ0zL886eyiv1Q= -github.com/elastic/go-sysinfo v1.15.4/go.mod h1:ZBVXmqS368dOn/jvijV/zHLfakWTYHBZPk3G244lHrU= -github.com/elastic/go-windows v1.0.2 h1:yoLLsAsV5cfg9FLhZ9EXZ2n2sQFKeDYrHenkcivY4vI= -github.com/elastic/go-windows v1.0.2/go.mod h1:bGcDpBzXgYSqM0Gx3DM4+UxFj300SZLixie9u9ixLM8= -github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= -github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/fxamacker/cbor/v2 v2.8.0 h1:fFtUGXUzXPHTIUdne5+zzMPTfffl3RD5qYnkY40vtxU= -github.com/fxamacker/cbor/v2 v2.8.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= -github.com/getkin/kin-openapi v0.133.0 h1:pJdmNohVIJ97r4AUFtEXRXwESr8b0bD721u/Tz6k8PQ= -github.com/getkin/kin-openapi v0.133.0/go.mod h1:boAciF6cXk5FhPqe/NQeBTeenbjqU4LhWBf09ILVvWE= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-openapi/jsonpointer v0.22.1 h1:sHYI1He3b9NqJ4wXLoJDKmUmHkWy/L7rtEo92JUxBNk= -github.com/go-openapi/jsonpointer v0.22.1/go.mod h1:pQT9OsLkfz1yWoMgYFy4x3U5GY5nUlsOn1qSBH5MkCM= -github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= -github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= -github.com/go-openapi/swag v0.23.1 h1:lpsStH0n2ittzTnbaSloVZLuB5+fvSY/+hnagBjSNZU= -github.com/go-openapi/swag v0.23.1/go.mod h1:STZs8TbRvEQQKUA+JZNAm3EWlgaOBGpyFDqQnDHMef0= -github.com/go-openapi/swag/jsonname v0.25.1 h1:Sgx+qbwa4ej6AomWC6pEfXrA6uP2RkaNjA9BR8a1RJU= -github.com/go-openapi/swag/jsonname v0.25.1/go.mod h1:71Tekow6UOLBD3wS7XhdT98g5J5GR13NOTQ9/6Q11Zo= -github.com/go-resty/resty/v2 v2.16.5 h1:hBKqmWrr7uRc3euHVqmh1HTHcKn99Smr7o5spptdhTM= -github.com/go-resty/resty/v2 v2.16.5/go.mod h1:hkJtXbA2iKHzJheXYvQ8snQES5ZLGKMwQ07xAwp/fiA= -github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= -github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= -github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM= -github.com/go-test/deep v1.0.8/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw= -github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= -github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= -github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= -github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= -github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g= -github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k= -github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= -github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= -github.com/jarcoal/httpmock v1.4.0 h1:BvhqnH0JAYbNudL2GMJKgOHe2CtKlzJ/5rWKyp+hc2k= -github.com/jarcoal/httpmock v1.4.0/go.mod h1:ftW1xULwo+j0R0JJkJIIi7UKigZUXCLLanykgjwBXL0= -github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= -github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/juju/gnuflag v0.0.0-20171113085948-2ce1bb71843d/go.mod h1:2PavIy+JPciBPrBUjwbNvtwB6RQlve+hkpll6QSNmOE= -github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8= -github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= -github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= -github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= -github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/madflojo/testcerts v1.4.0 h1:I09gN0C1ly9IgeVNcAqKk8RAKIJTe3QnFrrPBDyvzN4= -github.com/madflojo/testcerts v1.4.0/go.mod h1:MW8sh39gLnkKh4K0Nc55AyHEDl9l/FBLDUsQhpmkuo0= -github.com/mailru/easyjson v0.9.1 h1:LbtsOm5WAswyWbvTEOqhypdPeZzHavpZx96/n553mR8= -github.com/mailru/easyjson v0.9.1/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= -github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= -github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= -github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= -github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU= -github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= -github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= -github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/oapi-codegen/runtime v1.1.2 h1:P2+CubHq8fO4Q6fV1tqDBZHCwpVpvPg7oKiYzQgXIyI= -github.com/oapi-codegen/runtime v1.1.2/go.mod h1:SK9X900oXmPWilYR5/WKPzt3Kqxn/uS/+lbpREv+eCg= -github.com/oasdiff/yaml v0.0.0-20250309154309-f31be36b4037 h1:G7ERwszslrBzRxj//JalHPu/3yz+De2J+4aLtSRlHiY= -github.com/oasdiff/yaml v0.0.0-20250309154309-f31be36b4037/go.mod h1:2bpvgLBZEtENV5scfDFEtB/5+1M4hkQhDQrccEJ/qGw= -github.com/oasdiff/yaml3 v0.0.0-20250309153720-d2182401db90 h1:bQx3WeLcUWy+RletIKwUIt4x3t8n2SxavmoclizMb8c= -github.com/oasdiff/yaml3 v0.0.0-20250309153720-d2182401db90/go.mod h1:y5+oSEHCPT/DGrS++Wc/479ERge0zTFxaF8PbGKcg2o= -github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= -github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= -github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= -github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= -github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= -github.com/perimeterx/marshmallow v1.1.5 h1:a2LALqQ1BlHM8PZblsDdidgv1mWi1DgC2UmX50IvK2s= -github.com/perimeterx/marshmallow v1.1.5/go.mod h1:dsXbUu8CRzfYP5a87xpp0xq9S3u0Vchtcl8we9tYaXw= -github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI= -github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= -github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU= -github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= -github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= -github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= -github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= -github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= -github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= -github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU= -github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= -github.com/rs/zerolog v1.34.0 h1:k43nTLIwcTVQAncfCw4KZ2VY6ukYoZaBPNOE8txlOeY= -github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6wYQ= -github.com/smartystreets/assertions v1.13.1 h1:Ef7KhSmjZcK6AVf9YbJdvPYG9avaF0ZxudX+ThRdWfU= -github.com/smartystreets/assertions v1.13.1/go.mod h1:cXr/IwVfSo/RbCSPhoAPv73p3hlSdrBH/b3SdnW/LMY= -github.com/smartystreets/goconvey v1.8.0 h1:Oi49ha/2MURE0WexF052Z0m+BNSGirfjg5RL+JXWq3w= -github.com/smartystreets/goconvey v1.8.0/go.mod h1:EdX8jtrTIj26jmjCOVNMVSIYAtgexqXKHOXW2Dx9JLg= -github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= -github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spkg/bom v0.0.0-20160624110644-59b7046e48ad/go.mod h1:qLr4V1qq6nMqFKkMo8ZTx3f+BZEkzsRUY10Xsm2mwU0= -github.com/steadybit/action-kit/go/action_kit_api/v2 v2.10.0 h1:XIEeNvyXacUn52nONt7rMQDTvWUht+sX2RyrGxzcmts= -github.com/steadybit/action-kit/go/action_kit_api/v2 v2.10.0/go.mod h1:kc36KasHIY867fB7rBJtxg2orPaCU7xbezsAA05qcA8= -github.com/steadybit/action-kit/go/action_kit_sdk v1.3.0 h1:Ql3rJ9m407gZPGZK93jmf7FSTNzir31y2Hi/hP+g0+4= -github.com/steadybit/action-kit/go/action_kit_sdk v1.3.0/go.mod h1:fQm+I4UT7GSr5mzNuJ6MxE/iHl1xwesYXN5NM8l8N1A= -github.com/steadybit/action-kit/go/action_kit_test v1.4.2 h1:jCjnVMvJO1hzYKZgn4ivwzJNSWId2rRNhKvo9uGCKmk= -github.com/steadybit/action-kit/go/action_kit_test v1.4.2/go.mod h1:7vqpIZI0kasNi50I29fe7QybNZ3AaJpbap24j6w8Ujw= -github.com/steadybit/advice-kit/go/advice_kit_api v1.2.1 h1:fVZScd0xVSuGIn/+2BjJ/Uj4tCzY7s2E3AJcd1KfxpY= -github.com/steadybit/advice-kit/go/advice_kit_api v1.2.1/go.mod h1:lN4X8wOehdmDj87QAn2FaJ18tWwpYXieyQ09hBPkPMM= -github.com/steadybit/discovery-kit/go/discovery_kit_api v1.7.0 h1:eT+zVqsb8oriixyDNF5DTuB5L83Z8q6vEAbfN6dbpJA= -github.com/steadybit/discovery-kit/go/discovery_kit_api v1.7.0/go.mod h1:caJZz4j098TZelA84CTUDhT+/0zOti8mqSTMhkYj3yQ= -github.com/steadybit/discovery-kit/go/discovery_kit_commons v0.3.0 h1:D3yk1Izl2tczk3UD60bcmaLnpCzZ03fFVrV27jgnBEQ= -github.com/steadybit/discovery-kit/go/discovery_kit_commons v0.3.0/go.mod h1:vRvPsLTWvsR5Y5V/N/jtzRq03GNFVnbStgN6iKhqCqM= -github.com/steadybit/discovery-kit/go/discovery_kit_sdk v1.3.1 h1:Di/Gv6Wp+E2FBfx2ojf4cBQPJ+lwEk4mclql+lG3bOs= -github.com/steadybit/discovery-kit/go/discovery_kit_sdk v1.3.1/go.mod h1:z7Ma15GaX/UZMTUx/Uulwiz6k+2PwVqou6zP8TKW/Vw= -github.com/steadybit/discovery-kit/go/discovery_kit_test v1.2.0 h1:Pdgl5y0/MkwRqgXVWeHYYlQucTZ6BEAAAZmvgO+jzh4= -github.com/steadybit/discovery-kit/go/discovery_kit_test v1.2.0/go.mod h1:2Ox3Se50dEoG0kSuDhTvlLEWeBvtRmSly/OZCmtDiJk= -github.com/steadybit/event-kit/go/event_kit_api v1.6.0 h1:MZbVVLOPQn46enR3RjWglgWfGR5Eq/18BAex3bH8OQw= -github.com/steadybit/event-kit/go/event_kit_api v1.6.0/go.mod h1:XKfKcyouAOPW3TVr4sT253kJI9rYlzfy5yER1r7p8VU= -github.com/steadybit/extension-kit v1.10.0 h1:sOeMsGwYT+9v+U+tzUxy6xLMEH2h2qVw3uzgTWhnNpk= -github.com/steadybit/extension-kit v1.10.0/go.mod h1:Ow1kVKhBQwLGOrBRPSdGDDVdVBgaMDFjpxPV3FeoHho= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= -github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= -github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= -github.com/twmb/franz-go v1.19.5 h1:W7+o8D0RsQsedqib71OVlLeZ0zI6CbFra7yTYhZTs5Y= -github.com/twmb/franz-go v1.19.5/go.mod h1:4kFJ5tmbbl7asgwAGVuyG1ZMx0NNpYk7EqflvWfPCpM= -github.com/twmb/franz-go/pkg/kadm v1.16.1 h1:IEkrhTljgLHJ0/hT/InhXGjPdmWfFvxp7o/MR7vJ8cw= -github.com/twmb/franz-go/pkg/kadm v1.16.1/go.mod h1:Ue/ye1cc9ipsQFg7udFbbGiFNzQMqiH73fGC2y0rwyc= -github.com/twmb/franz-go/pkg/kfake v0.0.0-20250711145744-a849b8be17b7 h1:SmVArSUtiB+bsqMjHtqemjL1YCj4L74NSiOxjtwAJ/o= -github.com/twmb/franz-go/pkg/kfake v0.0.0-20250711145744-a849b8be17b7/go.mod h1:udxwmMC3r4xqjwrSrMi8p9jpqMDNpC2YwexpDSUmQtw= -github.com/twmb/franz-go/pkg/kmsg v1.11.2 h1:hIw75FpwcAjgeyfIGFqivAvwC5uNIOWRGvQgZhH4mhg= -github.com/twmb/franz-go/pkg/kmsg v1.11.2/go.mod h1:CFfkkLysDNmukPYhGzuUcDtf46gQSqCZHMW1T4Z+wDE= -github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU= -github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= -github.com/woodsbury/decimal128 v1.4.0 h1:xJATj7lLu4f2oObouMt2tgGiElE5gO6mSWUjQsBgUlc= -github.com/woodsbury/decimal128 v1.4.0/go.mod h1:BP46FUrVjVhdTbKT+XuQh2xfQaGki9LMIRJSFuh6THU= -github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= -github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= -github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0 h1:6fRhSjgLCkTD3JnJxvaJ4Sj+TYblw757bqYgZaOq5ZY= -github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0/go.mod h1:/LWChgwKmvncFJFHJ7Gvn9wZArjbV5/FppcK2fKk/tI= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/zmwangx/debounce v1.0.0 h1:Dyf+WfLESjc2bqFKHgI1dZTW9oh6CJm8SBDkhXrwLB4= -github.com/zmwangx/debounce v1.0.0/go.mod h1:U+/QHt+bSMdUh8XKOb6U+MQV5Ew4eS8M3ua5WJ7Ns6I= -go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= -go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4= -golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= -golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= -golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= -golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= -golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k= -golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= -golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4= -golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= -golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= -golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0= -golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.35.0 h1:mBffYraMEf7aa0sB+NuKnuCy8qI/9Bughn8dC2Gu5r0= -golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= -google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= -gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= -gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= -gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0/go.mod h1:WDnlLJ4WF5VGsH/HVa3CI79GS0ol3YnhVnKP89i0kNg= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -howett.net/plist v1.0.1 h1:37GdZ8tP09Q35o9ych3ehygcsL+HqKSwzctveSlarvM= -howett.net/plist v1.0.1/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g= -k8s.io/api v0.33.1 h1:tA6Cf3bHnLIrUK4IqEgb2v++/GYUtqiu9sRVk3iBXyw= -k8s.io/api v0.33.1/go.mod h1:87esjTn9DRSRTD4fWMXamiXxJhpOIREjWOSjsW1kEHw= -k8s.io/apimachinery v0.33.1 h1:mzqXWV8tW9Rw4VeW9rEkqvnxj59k1ezDUl20tFK/oM4= -k8s.io/apimachinery v0.33.1/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= -k8s.io/client-go v0.33.1 h1:ZZV/Ks2g92cyxWkRRnfUDsnhNn28eFpt26aGc8KbXF4= -k8s.io/client-go v0.33.1/go.mod h1:JAsUrl1ArO7uRVFWfcj6kOomSlCv+JpvIsp6usAGefA= -k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= -k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4= -k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8= -k8s.io/utils v0.0.0-20250502105355-0f33e8f1c979 h1:jgJW5IePPXLGB8e/1wvd0Ich9QE97RvvF3a8J3fP/Lg= -k8s.io/utils v0.0.0-20250502105355-0f33e8f1c979/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= -sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= -sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= -sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= -sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= -sigs.k8s.io/structured-merge-diff/v4 v4.7.0 h1:qPeWmscJcXP0snki5IYF79Z8xrl8ETFxgMd7wez1XkI= -sigs.k8s.io/structured-merge-diff/v4 v4.7.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= -sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= -sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/index.yaml b/index.yaml new file mode 100644 index 0000000..27d88c4 --- /dev/null +++ b/index.yaml @@ -0,0 +1,758 @@ +apiVersion: v1 +entries: + steadybit-extension-kafka: + - annotations: + artifacthub.io/images: | + - name: logo + image: https://steadybit-website-assets.s3.amazonaws.com/logo-symbol-transparent.png + artifacthub.io/links: |- + - name: Steadybit website + url: https://www.steadybit.com + - name: Steadybit reliability hub + url: https://hub.steadybit.com + apiVersion: v2 + appVersion: v1.2.4 + created: "2025-07-31T11:35:29.685863881Z" + dependencies: + - name: extensionlib + repository: https://steadybit.github.io/helm-charts + version: ^1.4.4 + description: Steadybit scaffold extension Helm chart for Kubernetes. + digest: e79018a10ec60b2f269038e6ef04b1ee38d3f6544c69c1bca421052ebf4bf726 + home: https://www.steadybit.com/ + icon: https://steadybit-website-assets.s3.amazonaws.com/logo-symbol-transparent.png + maintainers: + - email: antoine@steadybit.com + name: achoimet + name: steadybit-extension-kafka + sources: + - https://github.com/steadybit/extension-kafka + urls: + - https://github.com/steadybit/extension-kafka/releases/download/steadybit-extension-kafka-1.0.26/steadybit-extension-kafka-1.0.26.tgz + version: 1.0.26 + - annotations: + artifacthub.io/images: | + - name: logo + image: https://steadybit-website-assets.s3.amazonaws.com/logo-symbol-transparent.png + artifacthub.io/links: |- + - name: Steadybit website + url: https://www.steadybit.com + - name: Steadybit reliability hub + url: https://hub.steadybit.com + apiVersion: v2 + appVersion: v1.2.3 + created: "2025-07-17T13:24:37.318617761Z" + dependencies: + - name: extensionlib + repository: https://steadybit.github.io/helm-charts + version: ^1.4.4 + description: Steadybit scaffold extension Helm chart for Kubernetes. + digest: 06f991ceccff4b04f1057d34f01dd5f32dd0d709794f2a8002f84d286eb77282 + home: https://www.steadybit.com/ + icon: https://steadybit-website-assets.s3.amazonaws.com/logo-symbol-transparent.png + maintainers: + - email: antoine@steadybit.com + name: achoimet + name: steadybit-extension-kafka + sources: + - https://github.com/steadybit/extension-kafka + urls: + - https://github.com/steadybit/extension-kafka/releases/download/steadybit-extension-kafka-1.0.25/steadybit-extension-kafka-1.0.25.tgz + version: 1.0.25 + - annotations: + artifacthub.io/images: | + - name: logo + image: https://steadybit-website-assets.s3.amazonaws.com/logo-symbol-transparent.png + artifacthub.io/links: |- + - name: Steadybit website + url: https://www.steadybit.com + - name: Steadybit reliability hub + url: https://hub.steadybit.com + apiVersion: v2 + appVersion: v1.2.1 + created: "2025-07-17T08:56:36.793476347Z" + dependencies: + - name: extensionlib + repository: https://steadybit.github.io/helm-charts + version: ^1.4.4 + description: Steadybit scaffold extension Helm chart for Kubernetes. + digest: 39cfa27bb71ec8f6759fb399e37c1a708c9b6ab94d07d23c44f58e8e8d5f2f2f + home: https://www.steadybit.com/ + icon: https://steadybit-website-assets.s3.amazonaws.com/logo-symbol-transparent.png + maintainers: + - email: antoine@steadybit.com + name: achoimet + name: steadybit-extension-kafka + sources: + - https://github.com/steadybit/extension-kafka + urls: + - https://github.com/steadybit/extension-kafka/releases/download/steadybit-extension-kafka-1.0.24/steadybit-extension-kafka-1.0.24.tgz + version: 1.0.24 + - annotations: + artifacthub.io/images: | + - name: logo + image: https://steadybit-website-assets.s3.amazonaws.com/logo-symbol-transparent.png + artifacthub.io/links: |- + - name: Steadybit website + url: https://www.steadybit.com + - name: Steadybit reliability hub + url: https://hub.steadybit.com + apiVersion: v2 + appVersion: v1.2.0 + created: "2025-07-16T09:05:21.789330747Z" + dependencies: + - name: extensionlib + repository: https://steadybit.github.io/helm-charts + version: ^1.4.4 + description: Steadybit scaffold extension Helm chart for Kubernetes. + digest: 688c2b096f71da5fadf983b6f8bd3da06cdda119f40a1c1f336c5b245b4b93ba + home: https://www.steadybit.com/ + icon: https://steadybit-website-assets.s3.amazonaws.com/logo-symbol-transparent.png + maintainers: + - email: antoine@steadybit.com + name: achoimet + name: steadybit-extension-kafka + sources: + - https://github.com/steadybit/extension-kafka + urls: + - https://github.com/steadybit/extension-kafka/releases/download/steadybit-extension-kafka-1.0.23/steadybit-extension-kafka-1.0.23.tgz + version: 1.0.23 + - annotations: + artifacthub.io/images: | + - name: logo + image: https://steadybit-website-assets.s3.amazonaws.com/logo-symbol-transparent.png + artifacthub.io/links: |- + - name: Steadybit website + url: https://www.steadybit.com + - name: Steadybit reliability hub + url: https://hub.steadybit.com + apiVersion: v2 + appVersion: v1.1.1 + created: "2025-07-15T07:11:37.39462603Z" + dependencies: + - name: extensionlib + repository: https://steadybit.github.io/helm-charts + version: ^1.4.4 + description: Steadybit scaffold extension Helm chart for Kubernetes. + digest: c543a88df60369fd741a5e70660b62fd19de12a63886ea578eaa7629d1c9b8f4 + home: https://www.steadybit.com/ + icon: https://steadybit-website-assets.s3.amazonaws.com/logo-symbol-transparent.png + maintainers: + - email: antoine@steadybit.com + name: achoimet + name: steadybit-extension-kafka + sources: + - https://github.com/steadybit/extension-kafka + urls: + - https://github.com/steadybit/extension-kafka/releases/download/steadybit-extension-kafka-1.0.22/steadybit-extension-kafka-1.0.22.tgz + version: 1.0.22 + - annotations: + artifacthub.io/images: | + - name: logo + image: https://steadybit-website-assets.s3.amazonaws.com/logo-symbol-transparent.png + artifacthub.io/links: |- + - name: Steadybit website + url: https://www.steadybit.com + - name: Steadybit reliability hub + url: https://hub.steadybit.com + apiVersion: v2 + appVersion: v1.1.1 + created: "2025-04-29T12:55:10.680623306Z" + dependencies: + - name: extensionlib + repository: https://steadybit.github.io/helm-charts + version: ^1.4.4 + description: Steadybit scaffold extension Helm chart for Kubernetes. + digest: ae6d4ee24a7c3c8578c2f5b536e9a6767281be2aaf0e942e26405f699a852227 + home: https://www.steadybit.com/ + icon: https://steadybit-website-assets.s3.amazonaws.com/logo-symbol-transparent.png + maintainers: + - email: antoine@steadybit.com + name: achoimet + name: steadybit-extension-kafka + sources: + - https://github.com/steadybit/extension-kafka + urls: + - https://github.com/steadybit/extension-kafka/releases/download/steadybit-extension-kafka-1.0.21/steadybit-extension-kafka-1.0.21.tgz + version: 1.0.21 + - annotations: + artifacthub.io/images: | + - name: logo + image: https://steadybit-website-assets.s3.amazonaws.com/logo-symbol-transparent.png + artifacthub.io/links: |- + - name: Steadybit website + url: https://www.steadybit.com + - name: Steadybit reliability hub + url: https://hub.steadybit.com + apiVersion: v2 + appVersion: v1.1.1 + created: "2025-04-29T12:45:24.365463893Z" + dependencies: + - name: extensionlib + repository: https://steadybit.github.io/helm-charts + version: ^1.4.4 + description: Steadybit scaffold extension Helm chart for Kubernetes. + digest: b7557cc4d52891ce1e4031cc4b187343a93d28ae2ed287c3faf716c6dd6a833f + home: https://www.steadybit.com/ + icon: https://steadybit-website-assets.s3.amazonaws.com/logo-symbol-transparent.png + maintainers: + - email: antoine@steadybit.com + name: achoimet + name: steadybit-extension-kafka + sources: + - https://github.com/steadybit/extension-kafka + urls: + - https://github.com/steadybit/extension-kafka/releases/download/steadybit-extension-kafka-1.0.20/steadybit-extension-kafka-1.0.20.tgz + version: 1.0.20 + - annotations: + artifacthub.io/images: | + - name: logo + image: https://steadybit-website-assets.s3.amazonaws.com/logo-symbol-transparent.png + artifacthub.io/links: |- + - name: Steadybit website + url: https://www.steadybit.com + - name: Steadybit reliability hub + url: https://hub.steadybit.com + apiVersion: v2 + appVersion: v1.1.1 + created: "2025-04-29T09:30:57.002446636Z" + dependencies: + - name: extensionlib + repository: https://steadybit.github.io/helm-charts + version: ^1.4.4 + description: Steadybit scaffold extension Helm chart for Kubernetes. + digest: 3bfe271f724c81530330601e12fb921fb00cab3ca0eca650e15ac1067227eb30 + home: https://www.steadybit.com/ + icon: https://steadybit-website-assets.s3.amazonaws.com/logo-symbol-transparent.png + maintainers: + - email: antoine@steadybit.com + name: achoimet + name: steadybit-extension-kafka + sources: + - https://github.com/steadybit/extension-kafka + urls: + - https://github.com/steadybit/extension-kafka/releases/download/steadybit-extension-kafka-1.0.19/steadybit-extension-kafka-1.0.19.tgz + version: 1.0.19 + - annotations: + artifacthub.io/images: | + - name: logo + image: https://steadybit-website-assets.s3.amazonaws.com/logo-symbol-transparent.png + artifacthub.io/links: |- + - name: Steadybit website + url: https://www.steadybit.com + - name: Steadybit reliability hub + url: https://hub.steadybit.com + apiVersion: v2 + appVersion: v1.1.0 + created: "2025-04-09T12:38:09.285522751Z" + dependencies: + - name: extensionlib + repository: https://steadybit.github.io/helm-charts + version: ^1.4.4 + description: Steadybit scaffold extension Helm chart for Kubernetes. + digest: a2e65de8f4e195542989c8153e7397198def941254370f2eb4bd02e2442be5ea + home: https://www.steadybit.com/ + icon: https://steadybit-website-assets.s3.amazonaws.com/logo-symbol-transparent.png + maintainers: + - email: antoine@steadybit.com + name: achoimet + name: steadybit-extension-kafka + sources: + - https://github.com/steadybit/extension-kafka + urls: + - https://github.com/steadybit/extension-kafka/releases/download/steadybit-extension-kafka-1.0.18/steadybit-extension-kafka-1.0.18.tgz + version: 1.0.18 + - annotations: + artifacthub.io/images: | + - name: logo + image: https://steadybit-website-assets.s3.amazonaws.com/logo-symbol-transparent.png + artifacthub.io/links: |- + - name: Steadybit website + url: https://www.steadybit.com + - name: Steadybit reliability hub + url: https://hub.steadybit.com + apiVersion: v2 + appVersion: v1.1.0 + created: "2025-02-04T10:57:00.060676578Z" + dependencies: + - name: extensionlib + repository: https://steadybit.github.io/helm-charts + version: ^1.4.4 + description: Steadybit scaffold extension Helm chart for Kubernetes. + digest: 86d2120e011510b8509cf4e30ba9b05f0b1d5804788e84c9d85fc7780c9f9b66 + home: https://www.steadybit.com/ + icon: https://steadybit-website-assets.s3.amazonaws.com/logo-symbol-transparent.png + maintainers: + - email: antoine@steadybit.com + name: achoimet + name: steadybit-extension-kafka + sources: + - https://github.com/steadybit/extension-kafka + urls: + - https://github.com/steadybit/extension-kafka/releases/download/steadybit-extension-kafka-1.0.17/steadybit-extension-kafka-1.0.17.tgz + version: 1.0.17 + - annotations: + artifacthub.io/images: | + - name: logo + image: https://steadybit-website-assets.s3.amazonaws.com/logo-symbol-transparent.png + artifacthub.io/links: |- + - name: Steadybit website + url: https://www.steadybit.com + - name: Steadybit reliability hub + url: https://hub.steadybit.com + apiVersion: v2 + appVersion: v1.0.9 + created: "2025-02-03T13:49:16.657477529Z" + dependencies: + - name: extensionlib + repository: https://steadybit.github.io/helm-charts + version: ^1.4.4 + description: Steadybit scaffold extension Helm chart for Kubernetes. + digest: 17990ed027a09d3c50f6c40690f7eeb60324fa189988312699f19748321bcfa3 + home: https://www.steadybit.com/ + icon: https://steadybit-website-assets.s3.amazonaws.com/logo-symbol-transparent.png + maintainers: + - email: antoine@steadybit.com + name: achoimet + name: steadybit-extension-kafka + sources: + - https://github.com/steadybit/extension-kafka + urls: + - https://github.com/steadybit/extension-kafka/releases/download/steadybit-extension-kafka-1.0.16/steadybit-extension-kafka-1.0.16.tgz + version: 1.0.16 + - annotations: + artifacthub.io/images: | + - name: logo + image: https://steadybit-website-assets.s3.amazonaws.com/logo-symbol-transparent.png + artifacthub.io/links: |- + - name: Steadybit website + url: https://www.steadybit.com + - name: Steadybit reliability hub + url: https://hub.steadybit.com + apiVersion: v2 + appVersion: v1.0.9 + created: "2025-01-31T10:45:11.681229402Z" + dependencies: + - name: extensionlib + repository: https://steadybit.github.io/helm-charts + version: ^1.4.4 + description: Steadybit scaffold extension Helm chart for Kubernetes. + digest: 06b72a24cff412ab40424233faa5c0670db814cfc7f4d90dfcc69494270688fd + home: https://www.steadybit.com/ + icon: https://steadybit-website-assets.s3.amazonaws.com/logo-symbol-transparent.png + maintainers: + - email: antoine@steadybit.com + name: achoimet + name: steadybit-extension-kafka + sources: + - https://github.com/steadybit/extension-kafka + urls: + - https://github.com/steadybit/extension-kafka/releases/download/steadybit-extension-kafka-1.0.15/steadybit-extension-kafka-1.0.15.tgz + version: 1.0.15 + - annotations: + artifacthub.io/images: | + - name: logo + image: https://steadybit-website-assets.s3.amazonaws.com/logo-symbol-transparent.png + artifacthub.io/links: |- + - name: Steadybit website + url: https://www.steadybit.com + - name: Steadybit reliability hub + url: https://hub.steadybit.com + apiVersion: v2 + appVersion: v1.0.8 + created: "2025-01-31T08:06:54.730394327Z" + dependencies: + - name: extensionlib + repository: https://steadybit.github.io/helm-charts + version: ^1.4.4 + description: Steadybit scaffold extension Helm chart for Kubernetes. + digest: 946c11496ccc328f1c1b775423bd32da1b305d0c3baab151c274782bb5f60954 + home: https://www.steadybit.com/ + icon: https://steadybit-website-assets.s3.amazonaws.com/logo-symbol-transparent.png + maintainers: + - email: antoine@steadybit.com + name: achoimet + name: steadybit-extension-kafka + sources: + - https://github.com/steadybit/extension-kafka + urls: + - https://github.com/steadybit/extension-kafka/releases/download/steadybit-extension-kafka-1.0.14/steadybit-extension-kafka-1.0.14.tgz + version: 1.0.14 + - annotations: + artifacthub.io/images: | + - name: logo + image: https://steadybit-website-assets.s3.amazonaws.com/logo-symbol-transparent.png + artifacthub.io/links: |- + - name: Steadybit website + url: https://www.steadybit.com + - name: Steadybit reliability hub + url: https://hub.steadybit.com + apiVersion: v2 + appVersion: v1.0.8 + created: "2025-01-29T16:49:48.222931368Z" + dependencies: + - name: extensionlib + repository: https://steadybit.github.io/helm-charts + version: ^1.4.4 + description: Steadybit scaffold extension Helm chart for Kubernetes. + digest: 8f69d59d96e855890f35977326b7729a5e16d4901bb5161b782eea567ff4479c + home: https://www.steadybit.com/ + icon: https://steadybit-website-assets.s3.amazonaws.com/logo-symbol-transparent.png + maintainers: + - email: antoine@steadybit.com + name: achoimet + name: steadybit-extension-kafka + sources: + - https://github.com/steadybit/extension-kafka + urls: + - https://github.com/steadybit/extension-kafka/releases/download/steadybit-extension-kafka-1.0.13/steadybit-extension-kafka-1.0.13.tgz + version: 1.0.13 + - annotations: + artifacthub.io/images: | + - name: logo + image: https://steadybit-website-assets.s3.amazonaws.com/logo-symbol-transparent.png + artifacthub.io/links: |- + - name: Steadybit website + url: https://www.steadybit.com + - name: Steadybit reliability hub + url: https://hub.steadybit.com + apiVersion: v2 + appVersion: v1.0.7 + created: "2025-01-29T08:59:49.59048355Z" + dependencies: + - name: extensionlib + repository: https://steadybit.github.io/helm-charts + version: ^1.4.4 + description: Steadybit scaffold extension Helm chart for Kubernetes. + digest: 4a53e51f382895712b7f0b94d0ee4eaef0c944f0cfef962131919a83bbe1fe02 + home: https://www.steadybit.com/ + icon: https://steadybit-website-assets.s3.amazonaws.com/logo-symbol-transparent.png + maintainers: + - email: antoine@steadybit.com + name: achoimet + name: steadybit-extension-kafka + sources: + - https://github.com/steadybit/extension-kafka + urls: + - https://github.com/steadybit/extension-kafka/releases/download/steadybit-extension-kafka-1.0.12/steadybit-extension-kafka-1.0.12.tgz + version: 1.0.12 + - annotations: + artifacthub.io/images: | + - name: logo + image: https://steadybit-website-assets.s3.amazonaws.com/logo-symbol-transparent.png + artifacthub.io/links: |- + - name: Steadybit website + url: https://www.steadybit.com + - name: Steadybit reliability hub + url: https://hub.steadybit.com + apiVersion: v2 + appVersion: v1.0.6 + created: "2025-01-28T14:20:37.338004404Z" + dependencies: + - name: extensionlib + repository: https://steadybit.github.io/helm-charts + version: ^1.4.4 + description: Steadybit scaffold extension Helm chart for Kubernetes. + digest: 4b9c51ceaa11d9653cac69dc8631e4cb7f33f932cb2c5828d6662c6ce87f12e2 + home: https://www.steadybit.com/ + icon: https://steadybit-website-assets.s3.amazonaws.com/logo-symbol-transparent.png + maintainers: + - email: antoine@steadybit.com + name: achoimet + name: steadybit-extension-kafka + sources: + - https://github.com/steadybit/extension-kafka + urls: + - https://github.com/steadybit/extension-kafka/releases/download/steadybit-extension-kafka-1.0.11/steadybit-extension-kafka-1.0.11.tgz + version: 1.0.11 + - annotations: + artifacthub.io/images: | + - name: logo + image: https://steadybit-website-assets.s3.amazonaws.com/logo-symbol-transparent.png + artifacthub.io/links: |- + - name: Steadybit website + url: https://www.steadybit.com + - name: Steadybit reliability hub + url: https://hub.steadybit.com + apiVersion: v2 + appVersion: v1.0.7 + created: "2025-01-27T13:36:16.990144107Z" + dependencies: + - name: extensionlib + repository: https://steadybit.github.io/helm-charts + version: ^1.4.4 + description: Steadybit scaffold extension Helm chart for Kubernetes. + digest: ca02ba90b1e7fd9fc71f130f5a77e0b702a0130026188aa9a37bc8c549409ee8 + home: https://www.steadybit.com/ + icon: https://steadybit-website-assets.s3.amazonaws.com/logo-symbol-transparent.png + maintainers: + - email: antoine@steadybit.com + name: achoimet + name: steadybit-extension-kafka + sources: + - https://github.com/steadybit/extension-kafka + urls: + - https://github.com/steadybit/extension-kafka/releases/download/steadybit-extension-kafka-1.0.10/steadybit-extension-kafka-1.0.10.tgz + version: 1.0.10 + - annotations: + artifacthub.io/images: | + - name: logo + image: https://steadybit-website-assets.s3.amazonaws.com/logo-symbol-transparent.png + artifacthub.io/links: |- + - name: Steadybit website + url: https://www.steadybit.com + - name: Steadybit reliability hub + url: https://hub.steadybit.com + apiVersion: v2 + appVersion: v1.0.5 + created: "2025-01-16T07:46:30.099651511Z" + dependencies: + - name: extensionlib + repository: https://steadybit.github.io/helm-charts + version: ^1.4.4 + description: Steadybit scaffold extension Helm chart for Kubernetes. + digest: b64ef732e6156be40c5898d511c838d5a7a8292ebf14755e573f282daafba165 + home: https://www.steadybit.com/ + icon: https://steadybit-website-assets.s3.amazonaws.com/logo-symbol-transparent.png + maintainers: + - email: antoine@steadybit.com + name: achoimet + name: steadybit-extension-kafka + sources: + - https://github.com/steadybit/extension-kafka + urls: + - https://github.com/steadybit/extension-kafka/releases/download/steadybit-extension-kafka-1.0.9/steadybit-extension-kafka-1.0.9.tgz + version: 1.0.9 + - annotations: + artifacthub.io/images: | + - name: logo + image: https://steadybit-website-assets.s3.amazonaws.com/logo-symbol-transparent.png + artifacthub.io/links: |- + - name: Steadybit website + url: https://www.steadybit.com + - name: Steadybit reliability hub + url: https://hub.steadybit.com + apiVersion: v2 + appVersion: v1.0.5 + created: "2024-12-06T18:51:27.595942851Z" + dependencies: + - name: extensionlib + repository: https://steadybit.github.io/helm-charts + version: ^1.4.4 + description: Steadybit scaffold extension Helm chart for Kubernetes. + digest: c992f5d88153eec29ef7b4a897de71fb61f000bc5ec11ea565a807d46820b746 + home: https://www.steadybit.com/ + icon: https://steadybit-website-assets.s3.amazonaws.com/logo-symbol-transparent.png + maintainers: + - email: antoine@steadybit.com + name: achoimet + name: steadybit-extension-kafka + sources: + - https://github.com/steadybit/extension-kafka + urls: + - https://github.com/steadybit/extension-kafka/releases/download/steadybit-extension-kafka-1.0.8/steadybit-extension-kafka-1.0.8.tgz + version: 1.0.8 + - annotations: + artifacthub.io/images: | + - name: logo + image: https://steadybit-website-assets.s3.amazonaws.com/logo-symbol-transparent.png + artifacthub.io/links: |- + - name: Steadybit website + url: https://www.steadybit.com + - name: Steadybit reliability hub + url: https://hub.steadybit.com + apiVersion: v2 + appVersion: v1.0.4 + created: "2024-12-06T13:55:03.386854613Z" + dependencies: + - name: extensionlib + repository: https://steadybit.github.io/helm-charts + version: ^1.4.4 + description: Steadybit scaffold extension Helm chart for Kubernetes. + digest: 3710770c9bbfe0a9f2e2dcfea1225f0e5bcf787655e40479c7339588b8dc09cc + home: https://www.steadybit.com/ + icon: https://steadybit-website-assets.s3.amazonaws.com/logo-symbol-transparent.png + maintainers: + - email: antoine@steadybit.com + name: achoimet + name: steadybit-extension-kafka + sources: + - https://github.com/steadybit/extension-kafka + urls: + - https://github.com/steadybit/extension-kafka/releases/download/steadybit-extension-kafka-1.0.7/steadybit-extension-kafka-1.0.7.tgz + version: 1.0.7 + - annotations: + artifacthub.io/images: | + - name: logo + image: https://steadybit-website-assets.s3.amazonaws.com/logo-symbol-transparent.png + artifacthub.io/links: |- + - name: Steadybit website + url: https://www.steadybit.com + - name: Steadybit reliability hub + url: https://hub.steadybit.com + apiVersion: v2 + appVersion: v1.0.4 + created: "2024-11-29T11:51:07.12977138Z" + dependencies: + - name: extensionlib + repository: https://steadybit.github.io/helm-charts + version: ^1.4.4 + description: Steadybit scaffold extension Helm chart for Kubernetes. + digest: e8ccc5cb8a109edbd59370ca8198226688ffc12178f4e5b1729417864eba3f35 + home: https://www.steadybit.com/ + icon: https://steadybit-website-assets.s3.amazonaws.com/logo-symbol-transparent.png + maintainers: + - email: antoine@steadybit.com + name: achoimet + name: steadybit-extension-kafka + sources: + - https://github.com/steadybit/extension-kafka + urls: + - https://github.com/steadybit/extension-kafka/releases/download/steadybit-extension-kafka-1.0.6/steadybit-extension-kafka-1.0.6.tgz + version: 1.0.6 + - annotations: + artifacthub.io/images: | + - name: logo + image: https://steadybit-website-assets.s3.amazonaws.com/logo-symbol-transparent.png + artifacthub.io/links: |- + - name: Steadybit website + url: https://www.steadybit.com + - name: Steadybit reliability hub + url: https://hub.steadybit.com + apiVersion: v2 + appVersion: v1.0.3 + created: "2024-11-28T08:33:54.048754325Z" + dependencies: + - name: extensionlib + repository: https://steadybit.github.io/helm-charts + version: ^1.4.4 + description: Steadybit scaffold extension Helm chart for Kubernetes. + digest: cdea7fdef15ca8acc58dc7e4d3948541b8b4087f20e27ed7ff32ef1ad65a2c77 + home: https://www.steadybit.com/ + icon: https://steadybit-website-assets.s3.amazonaws.com/logo-symbol-transparent.png + maintainers: + - email: antoine@steadybit.com + name: achoimet + name: steadybit-extension-kafka + sources: + - https://github.com/steadybit/extension-kafka + urls: + - https://github.com/steadybit/extension-kafka/releases/download/steadybit-extension-kafka-1.0.5/steadybit-extension-kafka-1.0.5.tgz + version: 1.0.5 + - annotations: + artifacthub.io/images: | + - name: logo + image: https://steadybit-website-assets.s3.amazonaws.com/logo-symbol-transparent.png + artifacthub.io/links: |- + - name: Steadybit website + url: https://www.steadybit.com + - name: Steadybit reliability hub + url: https://hub.steadybit.com + apiVersion: v2 + appVersion: v1.0.3 + created: "2024-11-14T15:35:20.218257552Z" + dependencies: + - name: extensionlib + repository: https://steadybit.github.io/helm-charts + version: ^1.4.4 + description: Steadybit scaffold extension Helm chart for Kubernetes. + digest: bbf0b1f8ccfd25586e79cdfe95da2d87b3661e6fc7bf1de5cd0dc1e45834da73 + home: https://www.steadybit.com/ + icon: https://steadybit-website-assets.s3.amazonaws.com/logo-symbol-transparent.png + maintainers: + - email: antoine@steadybit.com + name: achoimet + name: steadybit-extension-kafka + sources: + - https://github.com/steadybit/extension-kafka + urls: + - https://github.com/steadybit/extension-kafka/releases/download/steadybit-extension-kafka-1.0.4/steadybit-extension-kafka-1.0.4.tgz + version: 1.0.4 + - annotations: + artifacthub.io/images: | + - name: logo + image: https://steadybit-website-assets.s3.amazonaws.com/logo-symbol-transparent.png + artifacthub.io/links: |- + - name: Steadybit website + url: https://www.steadybit.com + - name: Steadybit reliability hub + url: https://hub.steadybit.com + apiVersion: v2 + appVersion: 1.0.3 + created: "2024-11-14T15:25:11.447233233Z" + dependencies: + - name: extensionlib + repository: https://steadybit.github.io/helm-charts + version: ^1.4.4 + description: Steadybit scaffold extension Helm chart for Kubernetes. + digest: 817c6cf32a05bf438a1809fa45fad684ace4e918cab66cc15489d7315a2a3023 + home: https://www.steadybit.com/ + icon: https://steadybit-website-assets.s3.amazonaws.com/logo-symbol-transparent.png + maintainers: + - email: antoine@steadybit.com + name: achoimet + name: steadybit-extension-kafka + sources: + - https://github.com/steadybit/extension-kafka + urls: + - https://github.com/steadybit/extension-kafka/releases/download/steadybit-extension-kafka-1.0.3/steadybit-extension-kafka-1.0.3.tgz + version: 1.0.3 + - annotations: + artifacthub.io/images: | + - name: logo + image: https://steadybit-website-assets.s3.amazonaws.com/logo-symbol-transparent.png + artifacthub.io/links: |- + - name: Steadybit website + url: https://www.steadybit.com + - name: Steadybit reliability hub + url: https://hub.steadybit.com + apiVersion: v2 + appVersion: 1.0.2 + created: "2024-11-07T14:02:22.985764714Z" + dependencies: + - name: extensionlib + repository: https://steadybit.github.io/helm-charts + version: ^1.4.4 + description: Steadybit scaffold extension Helm chart for Kubernetes. + digest: 5c905ceef8516b124d76fd7cc1d30da7d5eaea5edb4694bd27cd554d294f505c + home: https://www.steadybit.com/ + icon: https://steadybit-website-assets.s3.amazonaws.com/logo-symbol-transparent.png + maintainers: + - email: antoine@steadybit.com + name: achoimet + name: steadybit-extension-kafka + sources: + - https://github.com/steadybit/extension-kafka + urls: + - https://github.com/steadybit/extension-kafka/releases/download/steadybit-extension-kafka-1.0.2/steadybit-extension-kafka-1.0.2.tgz + version: 1.0.2 + - annotations: + artifacthub.io/images: | + - name: logo + image: https://steadybit-website-assets.s3.amazonaws.com/logo-symbol-transparent.png + artifacthub.io/links: |- + - name: Steadybit website + url: https://www.steadybit.com + - name: Steadybit reliability hub + url: https://hub.steadybit.com + apiVersion: v2 + appVersion: 1.0.1 + created: "2024-11-07T13:09:27.513015772Z" + dependencies: + - name: extensionlib + repository: https://steadybit.github.io/helm-charts + version: ^1.4.4 + description: Steadybit scaffold extension Helm chart for Kubernetes. + digest: 7a646a2ae7941a9c34db96dddfbf499888f1ffafec5e1dc9c06c694ffee4aee8 + home: https://www.steadybit.com/ + icon: https://steadybit-website-assets.s3.amazonaws.com/logo-symbol-transparent.png + maintainers: + - email: daniel.reuter@steadybit.com + name: reuda + name: steadybit-extension-kafka + sources: + - https://github.com/steadybit/extension-kafka + urls: + - https://github.com/steadybit/extension-kafka/releases/download/steadybit-extension-kafka-1.0.1/steadybit-extension-kafka-1.0.1.tgz + version: 1.0.1 +generated: "2025-07-31T11:35:29.686129241Z" diff --git a/linuxpkg/config/logrotate.d/steadybit-extension-kafka b/linuxpkg/config/logrotate.d/steadybit-extension-kafka deleted file mode 100644 index 7aaf59a..0000000 --- a/linuxpkg/config/logrotate.d/steadybit-extension-kafka +++ /dev/null @@ -1,8 +0,0 @@ -/var/log/steadybit-extension-kafka.log { - copytruncate - size 10m - create 700 steadybit steadybit - dateext - rotate 5 - compress -} diff --git a/linuxpkg/config/steadybit/extension-kafka b/linuxpkg/config/steadybit/extension-kafka deleted file mode 100644 index 0a7a908..0000000 --- a/linuxpkg/config/steadybit/extension-kafka +++ /dev/null @@ -1,12 +0,0 @@ -STEADYBIT_LOG_LEVEL=info -STEADYBIT_LOG_FORMAT=text -STEADYBIT_EXTENSION_UNIX_SOCKET=/run/steadybit/extension-kafka.sock -# -# TODO: -# If you have a kafka auth mechanism, please provide the environment variables. -# For more information visit https://github.com/steadybit/extension-kafka -# -STEADYBIT_EXTENSION_SEED_BROKERS= -STEADYBIT_EXTENSION_SASL_MECHANISM= -STEADYBIT_EXTENSION_SASL_USER= -STEADYBIT_EXTENSION_SASL_PASSWORD= diff --git a/linuxpkg/config/steadybit/extensions.d/extension-kafka.yaml b/linuxpkg/config/steadybit/extensions.d/extension-kafka.yaml deleted file mode 100644 index 53331a2..0000000 --- a/linuxpkg/config/steadybit/extensions.d/extension-kafka.yaml +++ /dev/null @@ -1,4 +0,0 @@ -unixSocket: /run/steadybit/extension-kafka.sock -types: - - ACTION - - DISCOVERY diff --git a/linuxpkg/init.d/steadybit-extension-kafka b/linuxpkg/init.d/steadybit-extension-kafka deleted file mode 100755 index 22f48d5..0000000 --- a/linuxpkg/init.d/steadybit-extension-kafka +++ /dev/null @@ -1,99 +0,0 @@ -#!/bin/sh -# -# Copyright 2024 steadybit GmbH. All rights reserved. -# - -### BEGIN INIT INFO -# Provides: steadybit-extension-kafka -# Required-Start: $local_fs $network $named $time $syslog -# Required-Stop: $local_fs $network $named $time $syslog -# Default-Start: 2 3 4 5 -# Default-Stop: 0 1 6 -# Description: Steadybit Extension Kafka -# chkconfig: 2345 99 01 -### END INIT INFO - -SCRIPT=/opt/steadybit/extension-kafka/extension-kafka -RUNAS=steadybit - -PIDFILE=/var/run/steadybit-extension-kafka.pid -LOGFILE=/var/log/steadybit-extension-kafka.log -ENVFILE=/etc/steadybit/extension-kafka - -start() { - if [ -f "$PIDFILE" ] && kill -0 "$(cat "$PIDFILE")"; then - echo 'Service already running' >&2 - return 1 - fi - echo 'Starting service...' >&2 - - if [ ! -e "$LOGFILE" ]; then - touch "$LOGFILE" - if [ -n "$RUNAS" ]; then - chown "$RUNAS" "$LOGFILE" - fi - fi - - if [ -f "$ENVFILE" ]; then - export $(grep -v "^#" "$ENVFILE" | xargs) - fi - - su -s /bin/sh -c "$SCRIPT > \"$LOGFILE\" 2>&1 & echo \$!" $RUNAS >"$PIDFILE" - PID=$(cat "$PIDFILE") - sleep 1 - - if [ -z "$PID" ] || ! kill -0 "$PID" 2>/dev/null; then - echo "Service failed to start" >&2 - tail -n 10 "$LOGFILE" - return 1 - fi - echo 'Service started' >&2 -} - -stop() { - if [ ! -f "$PIDFILE" ] || ! kill -0 "$(cat "$PIDFILE")" 2>/dev/null; then - echo 'Service not running' >&2 - return 1 - fi - echo 'Stopping service...' >&2 - kill -15 "$(cat "$PIDFILE")" && rm -f "$PIDFILE" - echo 'Service stopped' >&2 -} - -status() { - if [ ! -f "$PIDFILE" ]; then - echo 'Service not running'. >&2 - return 3 - fi - PID=$(cat "$PIDFILE") - if ! kill -0 "$PID" 2>/dev/null; then - echo "Service not running: process $PID not found." >&2 - return 1 - fi - - echo 'Service running'. >&2 - return 0 -} - -case "$1" in -start) - start - ;; -status) - status - ;; -stop) - stop - ;; -force-reload) - stop - start - ;; -restart) - stop - start - ;; -*) - echo "Usage: $0 {start|stop|status|restart}" - ;; -esac diff --git a/linuxpkg/scripts/postinstall.sh b/linuxpkg/scripts/postinstall.sh deleted file mode 100755 index 5242bae..0000000 --- a/linuxpkg/scripts/postinstall.sh +++ /dev/null @@ -1,85 +0,0 @@ -#!/bin/sh -e - -# -# Copyright 2024 steadybit GmbH. All rights reserved. -# -service_name="steadybit-extension-kafka" -env_file="/etc/steadybit/extension-kafka" - -# decide if we should use SystemD or init/upstart -use_systemctl="True" -if ! command -V systemctl >/dev/null 2>&1; then - use_systemctl="False" -fi - -cleanup() { - # remove files that were not needed on this platform / system - if [ "${use_systemctl}" = "False" ]; then - rm -f "/usr/lib/systemd/system/$service_name.service" - else - rm -f "/etc/chkconfig/$service_name" - rm -f "/etc/init.d/$service_name" - fi -} - -cleanInstall() { - if [ -n "$STEADYBIT_LOG_LEVEL" ]; then - sed -i "s/^STEADYBIT_LOG_LEVEL=.*/STEADYBIT_LOG_LEVEL=$(echo "$STEADYBIT_LOG_LEVEL" | sed 's,/,\\/,g')/" "$env_file" - fi - - # enable the service in the proper way for this platform - if [ "${use_systemctl}" = "False" ]; then - if command -V chkconfig >/dev/null 2>&1; then - chkconfig --add "$service_name" - fi - - service "$service_name" restart || : - else - systemctl daemon-reload || : - systemctl unmask "$service_name" || : - systemctl preset "$service_name" || : - systemctl enable "$service_name" || : - systemctl restart "$service_name" || : - fi - -} - -upgrade() { - if [ "${use_systemctl}" = "False" ]; then - if service "$service_name" status 2>/dev/null; then - service "$service_name" restart || : - fi - else - systemctl daemon-reload - if systemctl is-active --quiet "$service_name"; then - systemctl restart "$service_name" || : - else - systemctl start "$service_name" || : - fi - fi -} - -#check if this is a clean install or an upgrade -action="$1" -if [ "$1" = "configure" ] && [ -z "$2" ]; then - # Alpine linux does not pass args, and deb passes $1=configure - action="install" -elif [ "$1" = "configure" ] && [ -n "$2" ]; then - # deb passes $1=configure $2= - action="upgrade" -fi - -case "$action" in -"1" | "install") - cleanInstall - ;; -"2" | "upgrade") - upgrade - ;; -*) - # $1 == version being installed on Alpine - cleanInstall - ;; -esac - -cleanup diff --git a/linuxpkg/scripts/postremove.sh b/linuxpkg/scripts/postremove.sh deleted file mode 100755 index 1210e57..0000000 --- a/linuxpkg/scripts/postremove.sh +++ /dev/null @@ -1,49 +0,0 @@ -#!/bin/sh -e - -# -# Copyright 2024 steadybit GmbH. All rights reserved. -# - -service_name="steadybit-extension-kafka" -# decide if we should use SystemD or init/upstart -use_systemctl="True" -if ! command -V systemctl >/dev/null 2>&1; then - use_systemctl="False" -fi - -remove() { - if [ "${use_systemctl}" = "True" ]; then - systemctl mask "$service_name" || : - fi -} - -purge() { - if [ "${use_systemctl}" = "True" ]; then - if systemctl is-enabled --quiet "$service_name"; then - systemctl disable "$service_name" || : - fi - systemctl unmask "$service_name" || : - fi -} - -upgrade() { - : -} - -action="$1" - -case "$action" in -"0" | "purge") - purge - ;; -"remove") - remove - ;; -"1" | "upgrade") - upgrade - ;; -*) - # $1 == version being installed on Alpine - remove - ;; -esac diff --git a/linuxpkg/scripts/preinstall.sh b/linuxpkg/scripts/preinstall.sh deleted file mode 100755 index a1fd30a..0000000 --- a/linuxpkg/scripts/preinstall.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/sh -e - -# -# Copyright 2024 steadybit GmbH. All rights reserved. -# - -if ! getent passwd steadybit >/dev/null 2>&1; then - useradd --system steadybit - printf "created user: steadybit\n" -fi - -if getent group docker >/dev/null 2>&1; then - gpasswd -a steadybit docker -fi diff --git a/linuxpkg/scripts/preremove.sh b/linuxpkg/scripts/preremove.sh deleted file mode 100755 index 6c6f152..0000000 --- a/linuxpkg/scripts/preremove.sh +++ /dev/null @@ -1,46 +0,0 @@ -#!/bin/sh -e - -# -# Copyright 2024 steadybit GmbH. All rights reserved. -# - -service_name="steadybit-extension-kafka" -# decide if we should use SystemD or init/upstart -use_systemctl="True" -if ! command -V systemctl >/dev/null 2>&1; then - use_systemctl="False" -fi - -remove() { - if [ "${use_systemctl}" = "False" ]; then - if service "$service_name" status 2>/dev/null; then - service "$service_name" stop - fi - else - if systemctl is-active --quiet "$service_name"; then - systemctl stop "$service_name" - fi - if systemctl is-enabled --quiet "$service_name"; then - systemctl disable "$service_name" - fi - fi -} - -upgrade() { - : -} - -action="$1" - -case "$action" in -"0" | "remove") - remove - ;; -"1" | "upgrade") - upgrade - ;; -*) - # $1 == version being installed on Alpine - remove - ;; -esac diff --git a/linuxpkg/systemd/steadybit-extension-kafka.service b/linuxpkg/systemd/steadybit-extension-kafka.service deleted file mode 100644 index d96cd32..0000000 --- a/linuxpkg/systemd/steadybit-extension-kafka.service +++ /dev/null @@ -1,18 +0,0 @@ -[Unit] -Description="steadybit extension kafka" -After=syslog.target - -[Service] -Type=simple -ExecStart=/opt/steadybit/extension-kafka/extension-kafka -EnvironmentFile=/etc/steadybit/extension-kafka -User=steadybit -Group=steadybit -SuccessExitStatus=0 143 -Restart=on-failure -RestartSec=5s -StandardOutput=append:/var/log/steadybit-extension-kafka.log -StandardError=append:/var/log/steadybit-extension-kafka.log - -[Install] -WantedBy=multi-user.target diff --git a/main.go b/main.go deleted file mode 100644 index 488a0ad..0000000 --- a/main.go +++ /dev/null @@ -1,271 +0,0 @@ -/* - * Copyright 2024 steadybit GmbH. All rights reserved. - */ - -package main - -import ( - "context" - "crypto/tls" - "crypto/x509" - _ "github.com/KimMachineGun/automemlimit" // By default, it sets `GOMEMLIMIT` to 90% of cgroup's memory limit. - "github.com/rs/zerolog" - "github.com/rs/zerolog/log" - "github.com/steadybit/action-kit/go/action_kit_api/v2" - "github.com/steadybit/action-kit/go/action_kit_sdk" - "github.com/steadybit/advice-kit/go/advice_kit_api" - "github.com/steadybit/discovery-kit/go/discovery_kit_api" - "github.com/steadybit/discovery-kit/go/discovery_kit_sdk" - "github.com/steadybit/event-kit/go/event_kit_api" - "github.com/steadybit/extension-kafka/config" - "github.com/steadybit/extension-kafka/extkafka" - "github.com/steadybit/extension-kit/extbuild" - "github.com/steadybit/extension-kit/exthealth" - "github.com/steadybit/extension-kit/exthttp" - "github.com/steadybit/extension-kit/extlogging" - "github.com/steadybit/extension-kit/extruntime" - "github.com/steadybit/extension-kit/extsignals" - "github.com/twmb/franz-go/pkg/kadm" - "github.com/twmb/franz-go/pkg/kgo" - "github.com/twmb/franz-go/pkg/sasl/plain" - "github.com/twmb/franz-go/pkg/sasl/scram" - _ "go.uber.org/automaxprocs" // Importing automaxprocs automatically adjusts GOMAXPROCS. - "net" - "os" - "os/signal" - "strings" - "syscall" - "time" -) - -func main() { - // Most Steadybit extensions leverage zerolog. To encourage persistent logging setups across extensions, - // you may leverage the extlogging package to initialize zerolog. Among others, this package supports - // configuration of active log levels and the log format (JSON or plain text). - // - // Example - // - to activate JSON logging, set the environment variable STEADYBIT_LOG_FORMAT="json" - // - to set the log level to debug, set the environment variable STEADYBIT_LOG_LEVEL="debug" - extlogging.InitZeroLog() - - // Build information is set at compile-time. This line writes the build information to the log. - // The information is mostly handy for debugging purposes. - extbuild.PrintBuildInformation() - extruntime.LogRuntimeInformation(zerolog.DebugLevel) - - // Most extensions require some form of configuration. These calls exist to parse and validate the - // configuration obtained from environment variables. - config.ParseConfiguration() - config.ValidateConfiguration() - testBrokerConnection() - - //This will start /health/liveness and /health/readiness endpoints on port 8081 for use with kubernetes - //The port can be configured using the STEADYBIT_EXTENSION_HEALTH_PORT environment variable - exthealth.SetReady(false) - exthealth.StartProbes(8084) - - ctx, cancel := SignalCanceledContext() - - registerHandlers(ctx) - - extsignals.AddSignalHandler(extsignals.SignalHandler{ - Handler: func(signal os.Signal) { - cancel() - }, - Order: extsignals.OrderStopCustom, - Name: "custom-extension-kafka", - }) - extsignals.ActivateSignalHandlers() - - //This will register the coverage endpoints for the extension (used by action_kit_test) - action_kit_sdk.RegisterCoverageEndpoints() - - //This will switch the readiness state of the application to true. - exthealth.SetReady(true) - - exthttp.Listen(exthttp.ListenOpts{ - // This is the default port under which your extension is accessible. - // The port can be configured externally through the - // STEADYBIT_EXTENSION_PORT environment variable. - // We suggest that you keep port 8080 as the default. - Port: 8083, - }) -} - -// ExtensionListResponse exists to merge the possible root path responses supported by the -// various extension kits. In this case, the response for ActionKit, DiscoveryKit and EventKit. -type ExtensionListResponse struct { - action_kit_api.ActionList `json:",inline"` - discovery_kit_api.DiscoveryList `json:",inline"` - event_kit_api.EventListenerList `json:",inline"` - advice_kit_api.AdviceList `json:",inline"` -} - -func registerHandlers(ctx context.Context) { - discovery_kit_sdk.Register(extkafka.NewKafkaBrokerDiscovery(ctx)) - discovery_kit_sdk.Register(extkafka.NewKafkaTopicDiscovery(ctx)) - discovery_kit_sdk.Register(extkafka.NewKafkaConsumerGroupDiscovery(ctx)) - action_kit_sdk.RegisterAction(extkafka.NewProduceMessageActionPeriodically()) - action_kit_sdk.RegisterAction(extkafka.NewProduceMessageActionFixedAmount()) - action_kit_sdk.RegisterAction(extkafka.NewConsumerGroupCheckAction()) - action_kit_sdk.RegisterAction(extkafka.NewConsumerGroupLagCheckAction()) - action_kit_sdk.RegisterAction(extkafka.NewKafkaBrokerElectNewLeaderAttack()) - action_kit_sdk.RegisterAction(extkafka.NewDeleteRecordsAttack()) - action_kit_sdk.RegisterAction(extkafka.NewAlterMaxMessageBytesAttack()) - action_kit_sdk.RegisterAction(extkafka.NewAlterNumberIOThreadsAttack()) - action_kit_sdk.RegisterAction(extkafka.NewAlterNumberNetworkThreadsAttack()) - action_kit_sdk.RegisterAction(extkafka.NewAlterLimitConnectionCreateRateAttack()) - action_kit_sdk.RegisterAction(extkafka.NewKafkaConsumerDenyAccessAttack()) - action_kit_sdk.RegisterAction(extkafka.NewPartitionsCheckAction()) - action_kit_sdk.RegisterAction(extkafka.NewBrokersCheckAction()) - - exthttp.RegisterHttpHandler("/", exthttp.GetterAsHandler(getExtensionList)) -} - -func SignalCanceledContext() (context.Context, context.CancelFunc) { - ctx, cancel := context.WithCancel(context.Background()) - c := make(chan os.Signal, 1) - signal.Notify(c, syscall.SIGINT, syscall.SIGTERM, syscall.SIGUSR1) - - go func() { - select { - case <-c: - cancel() - case <-ctx.Done(): - } - }() - - return ctx, func() { - signal.Stop(c) - cancel() - } -} - -func getExtensionList() ExtensionListResponse { - return ExtensionListResponse{ - // See this document to learn more about the action list: - // https://github.com/steadybit/action-kit/blob/main/docs/action-api.md#action-list - ActionList: action_kit_sdk.GetActionList(), - - // See this document to learn more about the discovery list: - // https://github.com/steadybit/discovery-kit/blob/main/docs/discovery-api.md#index-response - DiscoveryList: discovery_kit_sdk.GetDiscoveryList(), - } -} - -func testBrokerConnection() { - opts := []kgo.Opt{ - kgo.SeedBrokers(strings.Split(config.Config.SeedBrokers, ",")...), - kgo.DefaultProduceTopic("steadybit"), - kgo.ClientID("steadybit"), - } - - if config.Config.SaslMechanism != "" { - switch saslMechanism := config.Config.SaslMechanism; saslMechanism { - case kadm.ScramSha256.String(): - opts = append(opts, []kgo.Opt{ - kgo.SASL(scram.Auth{ - User: config.Config.SaslUser, - Pass: config.Config.SaslPassword, - }.AsSha256Mechanism()), - }...) - case kadm.ScramSha512.String(): - opts = append(opts, []kgo.Opt{ - kgo.SASL(scram.Auth{ - User: config.Config.SaslUser, - Pass: config.Config.SaslPassword, - }.AsSha512Mechanism()), - }...) - default: - opts = append(opts, []kgo.Opt{ - kgo.SASL(plain.Auth{ - User: config.Config.SaslUser, - Pass: config.Config.SaslPassword, - }.AsMechanism()), - }...) - } - } - - if config.Config.KafkaClusterCaFile != "" && config.Config.KafkaClusterCertKeyFile != "" && config.Config.KafkaClusterCertChainFile != "" { - tlsConfig, err := newTLSConfig(config.Config.KafkaClusterCertChainFile, config.Config.KafkaClusterCertKeyFile, config.Config.KafkaClusterCaFile) - if err != nil { - log.Fatal().Err(err).Msgf("failed to create tls config: %s", err.Error()) - } - - opts = append(opts, kgo.DialTLSConfig(tlsConfig)) - } else if config.Config.KafkaConnectionUseTLS == "true" { - tlsDialer := &tls.Dialer{NetDialer: &net.Dialer{Timeout: 10 * time.Second}} - opts = append(opts, kgo.Dialer(tlsDialer.DialContext)) - } - - client, err := kgo.NewClient(opts...) - if err != nil { - log.Fatal().Err(err).Msgf("failed to initialize kafka client: %s", err.Error()) - } - defer client.Close() - - err = client.Ping(context.Background()) - if err != nil { - log.Fatal().Err(err).Msgf("Failed to reach brokers: %s", err.Error()) - } - log.Info().Msg("Successfully reached the brokers.") - //initTestData(client) -} - -//func initTestData(client *kgo.Client) { -// admin := kadm.NewClient(client) -// defer admin.Close() -// -// //Step 3: Define topic parameters -// topicName := "steadybit" -// -// // Step 4: Create the topic using the Admin client -// ctx := context.Background() -// result, err := admin.CreateTopics(ctx, -1, -1, nil, topicName) -// if err != nil { -// log.Fatal().Err(err).Msgf("Failed to create topic: %v", err) -// } -// -// // Step 5: Check the result of the topic creation -// for _, res := range result { -// if res.Err != nil { -// fmt.Printf("Failed to create topic %s: %v\n", res.Topic, res.Err) -// } else { -// fmt.Printf("Topic %s created successfully\n", res.Topic) -// } -// } -// -// // Create ACL for dummy client -// acl := kadm.NewACLs(). -// ResourcePatternType(kadm.ACLPatternLiteral). -// Topics("*"). -// Groups("dummy"). -// Operations(kadm.OpRead, kadm.OpWrite, kadm.OpDescribe). -// Allow("User:consumer") -// _, err = admin.CreateACLs(ctx, acl) -// if err != nil { -// return -// } -//} - -func newTLSConfig(clientCertFile, clientKeyFile, caCertFile string) (*tls.Config, error) { - tlsConfig := tls.Config{} - - // Load client cert - cert, err := tls.LoadX509KeyPair(clientCertFile, clientKeyFile) - if err != nil { - return &tlsConfig, err - } - tlsConfig.Certificates = []tls.Certificate{cert} - - // Load CA cert - caCert, err := os.ReadFile(caCertFile) - if err != nil { - return &tlsConfig, err - } - caCertPool := x509.NewCertPool() - caCertPool.AppendCertsFromPEM(caCert) - tlsConfig.RootCAs = caCertPool - - return &tlsConfig, err -} diff --git a/sonar-project.properties b/sonar-project.properties deleted file mode 100644 index e3c334e..0000000 --- a/sonar-project.properties +++ /dev/null @@ -1,9 +0,0 @@ -sonar.projectKey=steadybit_extension-kafka -sonar.organization=steadybit - -sonar.sources=. -sonar.exclusions=**/*_test.go -sonar.tests=. -sonar.test.inclusions=**/*_test.go - -sonar.go.coverage.reportPaths=coverage.out,e2e/e2e-coverage-docker.out diff --git a/test-dataset/dummyconsumer/Dockerfile b/test-dataset/dummyconsumer/Dockerfile deleted file mode 100644 index b38de21..0000000 --- a/test-dataset/dummyconsumer/Dockerfile +++ /dev/null @@ -1,19 +0,0 @@ -FROM golang:1.24 AS builder - -WORKDIR /app -COPY . . -RUN CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build -o myapp - -FROM alpine:3.14 - -ARG USERNAME=steadybit -ARG USER_UID=10000 - -RUN adduser -u $USER_UID -D $USERNAME - -USER $USER_UID - -WORKDIR /app -COPY --from=builder /app/myapp /app/ - -CMD ["/app/myapp"] diff --git a/test-dataset/dummyconsumer/go.mod b/test-dataset/dummyconsumer/go.mod deleted file mode 100644 index f97e194..0000000 --- a/test-dataset/dummyconsumer/go.mod +++ /dev/null @@ -1,18 +0,0 @@ -module github.com/steadybit/extension-kafka/dummyconsummer - -go 1.24 - -require ( - github.com/rs/zerolog v1.34.0 - github.com/twmb/franz-go v1.19.5 -) - -require ( - github.com/klauspost/compress v1.18.0 // indirect - github.com/mattn/go-colorable v0.1.14 // indirect - github.com/mattn/go-isatty v0.0.20 // indirect - github.com/pierrec/lz4/v4 v4.1.22 // indirect - github.com/twmb/franz-go/pkg/kmsg v1.11.2 // indirect - golang.org/x/crypto v0.41.0 // indirect - golang.org/x/sys v0.35.0 // indirect -) diff --git a/test-dataset/dummyconsumer/go.sum b/test-dataset/dummyconsumer/go.sum deleted file mode 100644 index f608c93..0000000 --- a/test-dataset/dummyconsumer/go.sum +++ /dev/null @@ -1,28 +0,0 @@ -github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= -github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= -github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= -github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= -github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= -github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU= -github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= -github.com/rs/zerolog v1.34.0 h1:k43nTLIwcTVQAncfCw4KZ2VY6ukYoZaBPNOE8txlOeY= -github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6wYQ= -github.com/twmb/franz-go v1.19.5 h1:W7+o8D0RsQsedqib71OVlLeZ0zI6CbFra7yTYhZTs5Y= -github.com/twmb/franz-go v1.19.5/go.mod h1:4kFJ5tmbbl7asgwAGVuyG1ZMx0NNpYk7EqflvWfPCpM= -github.com/twmb/franz-go/pkg/kmsg v1.11.2 h1:hIw75FpwcAjgeyfIGFqivAvwC5uNIOWRGvQgZhH4mhg= -github.com/twmb/franz-go/pkg/kmsg v1.11.2/go.mod h1:CFfkkLysDNmukPYhGzuUcDtf46gQSqCZHMW1T4Z+wDE= -golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4= -golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= -golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= diff --git a/test-dataset/dummyconsumer/main.go b/test-dataset/dummyconsumer/main.go deleted file mode 100644 index 9ecaa5a..0000000 --- a/test-dataset/dummyconsumer/main.go +++ /dev/null @@ -1,77 +0,0 @@ -package main - -import ( - "context" - "fmt" - "github.com/rs/zerolog/log" - "github.com/twmb/franz-go/pkg/kgo" - "github.com/twmb/franz-go/pkg/sasl/scram" - "os" - "strings" -) - -func main() { - // Configure - seeds, _ := os.LookupEnv("STEADYBIT_DUMMY_SEED_BROKERS") - if seeds == "" { - seeds = "kafka-demo.steadybit-demo.svc.cluster.local:9092" - } - - saslUser, _ := os.LookupEnv("STEADYBIT_DUMMY_SASL_USER") - if saslUser == "" { - saslUser = "user1" - } - - saslPassword, _ := os.LookupEnv("STEADYBIT_DUMMY_SASL_PASSWORD") - if saslPassword == "" { - saslPassword = "steadybit" - } - - topic, _ := os.LookupEnv("STEADYBIT_DUMMY_TOPIC") - if topic == "" { - topic = "steadybit-demo" - } - - consumer, _ := os.LookupEnv("STEADYBIT_DUMMY_CONSUMER_NAME") - if consumer == "" { - consumer = "steadybit-demo-consumer" - } - - // One client can both produce and consume! - // Consuming can either be direct (no consumer group), or through a group. Below, we use a group. - cl, err := kgo.NewClient( - kgo.SeedBrokers(strings.Split(seeds, ",")...), - kgo.ConsumerGroup(consumer), - kgo.ConsumeTopics(topic), - kgo.SASL(scram.Auth{ - User: saslUser, - Pass: saslPassword, - }.AsSha512Mechanism()), - ) - log.Info().Msgf("Initiating consumer with kafka config: brokers %s, consumer name %s on topic %s with user %s", seeds, consumer, topic, saslUser) - if err != nil { - panic(err) - } - defer cl.Close() - - ctx := context.Background() - - // 2.) Consuming messages from a topic - for { - fetches := cl.PollFetches(ctx) - if errs := fetches.Errors(); len(errs) > 0 { - // All errors are retried internally when fetching, but non-retriable errors are - // returned from polls so that users can notice and take action. - log.Info().Msg(fmt.Sprint(errs)) - } - - // We can iterate through a record iterator... - iter := fetches.RecordIter() - for !iter.Done() { - record := iter.Next() - fmt.Printf("%s from an iterator! Offset: %d\n", record.Value, record.Offset) - } - - } - -} diff --git a/test-dataset/kafka-docker-compose/client-properties/adminclient.properties b/test-dataset/kafka-docker-compose/client-properties/adminclient.properties deleted file mode 100644 index 65d0084..0000000 --- a/test-dataset/kafka-docker-compose/client-properties/adminclient.properties +++ /dev/null @@ -1,3 +0,0 @@ -security.protocol=SASL_PLAINTEXT -sasl.mechanism=PLAIN -sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required username="admin" password="admin-secret"; diff --git a/test-dataset/kafka-docker-compose/client-properties/consumer.properties b/test-dataset/kafka-docker-compose/client-properties/consumer.properties deleted file mode 100644 index 5f284ce..0000000 --- a/test-dataset/kafka-docker-compose/client-properties/consumer.properties +++ /dev/null @@ -1,3 +0,0 @@ -security.protocol=SASL_PLAINTEXT -sasl.mechanism=PLAIN -sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required username="consumer" password="consumer-secret"; diff --git a/test-dataset/kafka-docker-compose/client-properties/producer.properties b/test-dataset/kafka-docker-compose/client-properties/producer.properties deleted file mode 100644 index fe38214..0000000 --- a/test-dataset/kafka-docker-compose/client-properties/producer.properties +++ /dev/null @@ -1,3 +0,0 @@ -security.protocol=SASL_PLAINTEXT -sasl.mechanism=PLAIN -sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required username="producer" password="producer-secret"; diff --git a/test-dataset/kafka-docker-compose/docker-compose.yml b/test-dataset/kafka-docker-compose/docker-compose.yml deleted file mode 100644 index af37957..0000000 --- a/test-dataset/kafka-docker-compose/docker-compose.yml +++ /dev/null @@ -1,160 +0,0 @@ -# Basic authorization with SASL (Plaintext to Kafka and DigestMD5 to Zookeeper) ---- -version: '2' -services: - zookeeper: - image: confluentinc/cp-zookeeper:5.4.1 - ports: - - '31000:31000' - environment: - ZOOKEEPER_CLIENT_PORT: 2181 - ZOOKEEPER_TICK_TIME: 2000 - KAFKA_OPTS: "-Dzookeeper.authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider -Djava.security.auth.login.config=/opt/security/zookeeper-server.jaas" - KAFKA_JMX_HOSTNAME: "localhost" - KAFKA_JMX_PORT: 31000 - volumes: - - ./security:/opt/security - - kafka: - hostname: kafka - image: confluentinc/cp-server:5.4.1 - ports: - - '9092:9092' - - '9093:9093' - - '31001:31001' - depends_on: - - zookeeper - environment: - KAFKA_BROKER_ID: 1 - KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 - KAFKA_INTER_BROKER_LISTENER_NAME: EXTERNAL - KAFKA_LISTENERS: "EXTERNAL://kafka:9092,INTERNAL://localhost:9093" - KAFKA_ADVERTISED_LISTENERS: "EXTERNAL://kafka:9092,INTERNAL://localhost:9093" - KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: "EXTERNAL:SASL_PLAINTEXT,INTERNAL:SASL_PLAINTEXT" - KAFKA_AUTO_CREATE_TOPICS_ENABLE: "true" - KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 - KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 100 -# KAFKA_SASL_ENABLED_MECHANISMS: PLAIN - KAFKA_LISTENER_NAME_EXTERNAL_SASL_ENABLED_MECHANISMS: PLAIN - KAFKA_LISTENER_NAME_INTERNAL_SASL_ENABLED_MECHANISMS: PLAIN -# KAFKA_LISTENER_NAME_EXTERNAL_PLAIN_SASL_JAAS_CONFIG: "KafkaServer {org.apache.kafka.common.security.plain.PlainLoginModule required username='admin' password='admin-secret' user_admin='admin-secret';} KafkaClient {org.apache.kafka.common.security.plain.PlainLoginModule required username='admin' password='admin-secret';};" -# KAFKA_LISTENER_NAME_INTERNAL_PLAIN_SASL_JAAS_CONFIG: "KafkaServer {org.apache.kafka.common.security.plain.PlainLoginModule required username='admin' password='admin-secret' user_admin='admin-secret';} KafkaClient {org.apache.kafka.common.security.plain.PlainLoginModule required username='admin' password='admin-secret';};" -# KAFKA_ZOOKEEPER_SASL_CLIENTCONFIG: "org.apache.zookeeper.server.auth.DigestLoginModule required username='admin' password='admin-secret';};" - - KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL: PLAIN - KAFKA_AUTHORIZER_CLASS_NAME: kafka.security.auth.SimpleAclAuthorizer -# KAFKA_AUTHORIZER_CLASS_NAME: io.confluent.kafka.security.authorizer.ConfluentServerAuthorizer -# KAFKA_CONFLUENT_AUTHORIZER_ACCESS_RULE_PROVIDERS: "ZK_ACL,CONFLUENT" - KAFKA_SUPER_USERS: "User:admin" - KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: "false" - KAFKA_ZOOKEEPER_SET_ACL: "true" - KAFKA_OPTS: "-Djava.security.auth.login.config=/opt/security/kafka-server.jaas" - KAFKA_JMX_HOSTNAME: "localhost" - KAFKA_JMX_PORT: 31001 - KAFKA_METRIC_REPORTERS: io.confluent.metrics.reporter.ConfluentMetricsReporter - CONFLUENT_METRICS_REPORTER_BOOTSTRAP_SERVERS: localhost:9093 - CONFLUENT_METRICS_REPORTER_ZOOKEEPER_CONNECT: zookeeper:2181 - CONFLUENT_METRICS_REPORTER_TOPIC_REPLICAS: 1 - CONFLUENT_METRICS_REPORTER_SECURITY_PROTOCOL: SASL_PLAINTEXT - CONFLUENT_METRICS_REPORTER_SASL_MECHANISM: PLAIN - CONFLUENT_METRICS_REPORTER_SASL_JAAS_CONFIG: "org.apache.kafka.common.security.plain.PlainLoginModule required username='admin' password='admin-secret';" - - KAFKA_CONFLUENT_LICENSE_TOPIC_REPLICATION_FACTOR: 1 - volumes: - - ./security:/opt/security - - kafka2: - hostname: kafka - image: confluentinc/cp-server:5.4.1 - ports: - - '9096:9096' - - '9097:9097' - - '31002:31002' - depends_on: - - zookeeper - environment: - KAFKA_BROKER_ID: 2 - KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 - KAFKA_INTER_BROKER_LISTENER_NAME: EXTERNAL - KAFKA_LISTENERS: "EXTERNAL://kafka2:9096,INTERNAL://localhost:9097" - KAFKA_ADVERTISED_LISTENERS: "EXTERNAL://kafka2:9096,INTERNAL://localhost:9097" - KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: "EXTERNAL:SASL_PLAINTEXT,INTERNAL:SASL_PLAINTEXT" - KAFKA_AUTO_CREATE_TOPICS_ENABLE: "true" - KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 - KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 100 - # KAFKA_SASL_ENABLED_MECHANISMS: PLAIN - KAFKA_LISTENER_NAME_EXTERNAL_SASL_ENABLED_MECHANISMS: PLAIN - KAFKA_LISTENER_NAME_INTERNAL_SASL_ENABLED_MECHANISMS: PLAIN - # KAFKA_LISTENER_NAME_EXTERNAL_PLAIN_SASL_JAAS_CONFIG: "KafkaServer {org.apache.kafka.common.security.plain.PlainLoginModule required username='admin' password='admin-secret' user_admin='admin-secret';} KafkaClient {org.apache.kafka.common.security.plain.PlainLoginModule required username='admin' password='admin-secret';};" - # KAFKA_LISTENER_NAME_INTERNAL_PLAIN_SASL_JAAS_CONFIG: "KafkaServer {org.apache.kafka.common.security.plain.PlainLoginModule required username='admin' password='admin-secret' user_admin='admin-secret';} KafkaClient {org.apache.kafka.common.security.plain.PlainLoginModule required username='admin' password='admin-secret';};" - # KAFKA_ZOOKEEPER_SASL_CLIENTCONFIG: "org.apache.zookeeper.server.auth.DigestLoginModule required username='admin' password='admin-secret';};" - - KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL: PLAIN - KAFKA_AUTHORIZER_CLASS_NAME: kafka.security.auth.SimpleAclAuthorizer - # KAFKA_AUTHORIZER_CLASS_NAME: io.confluent.kafka.security.authorizer.ConfluentServerAuthorizer - # KAFKA_CONFLUENT_AUTHORIZER_ACCESS_RULE_PROVIDERS: "ZK_ACL,CONFLUENT" - KAFKA_SUPER_USERS: "User:admin" - KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: "false" - KAFKA_ZOOKEEPER_SET_ACL: "true" - KAFKA_OPTS: "-Djava.security.auth.login.config=/opt/security/kafka-server.jaas" - KAFKA_JMX_HOSTNAME: "localhost" - KAFKA_JMX_PORT: 31002 - KAFKA_METRIC_REPORTERS: io.confluent.metrics.reporter.ConfluentMetricsReporter - CONFLUENT_METRICS_REPORTER_BOOTSTRAP_SERVERS: localhost:9097 - CONFLUENT_METRICS_REPORTER_ZOOKEEPER_CONNECT: zookeeper:2181 - CONFLUENT_METRICS_REPORTER_TOPIC_REPLICAS: 1 - CONFLUENT_METRICS_REPORTER_SECURITY_PROTOCOL: SASL_PLAINTEXT - CONFLUENT_METRICS_REPORTER_SASL_MECHANISM: PLAIN - CONFLUENT_METRICS_REPORTER_SASL_JAAS_CONFIG: "org.apache.kafka.common.security.plain.PlainLoginModule required username='admin' password='admin-secret';" - - KAFKA_CONFLUENT_LICENSE_TOPIC_REPLICATION_FACTOR: 1 - volumes: - - ./security:/opt/security - - kafka3: - hostname: kafka3 - image: confluentinc/cp-server:5.4.1 - ports: - - '9094:9094' - - '9095:9095' - - '31003:31003' - depends_on: - - zookeeper - environment: - KAFKA_BROKER_ID: 3 - KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 - KAFKA_INTER_BROKER_LISTENER_NAME: EXTERNAL - KAFKA_LISTENERS: "EXTERNAL://kafka3:9094,INTERNAL://localhost:9095" - KAFKA_ADVERTISED_LISTENERS: "EXTERNAL://kafka3:9094,INTERNAL://localhost:9095" - KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: "EXTERNAL:SASL_PLAINTEXT,INTERNAL:SASL_PLAINTEXT" - KAFKA_AUTO_CREATE_TOPICS_ENABLE: "true" - KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 - KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 100 - # KAFKA_SASL_ENABLED_MECHANISMS: PLAIN - KAFKA_LISTENER_NAME_EXTERNAL_SASL_ENABLED_MECHANISMS: PLAIN - KAFKA_LISTENER_NAME_INTERNAL_SASL_ENABLED_MECHANISMS: PLAIN - # KAFKA_LISTENER_NAME_EXTERNAL_PLAIN_SASL_JAAS_CONFIG: "KafkaServer {org.apache.kafka.common.security.plain.PlainLoginModule required username='admin' password='admin-secret' user_admin='admin-secret';} KafkaClient {org.apache.kafka.common.security.plain.PlainLoginModule required username='admin' password='admin-secret';};" - # KAFKA_LISTENER_NAME_INTERNAL_PLAIN_SASL_JAAS_CONFIG: "KafkaServer {org.apache.kafka.common.security.plain.PlainLoginModule required username='admin' password='admin-secret' user_admin='admin-secret';} KafkaClient {org.apache.kafka.common.security.plain.PlainLoginModule required username='admin' password='admin-secret';};" - # KAFKA_ZOOKEEPER_SASL_CLIENTCONFIG: "org.apache.zookeeper.server.auth.DigestLoginModule required username='admin' password='admin-secret';};" - - KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL: PLAIN - KAFKA_AUTHORIZER_CLASS_NAME: kafka.security.auth.SimpleAclAuthorizer - # KAFKA_AUTHORIZER_CLASS_NAME: io.confluent.kafka.security.authorizer.ConfluentServerAuthorizer - # KAFKA_CONFLUENT_AUTHORIZER_ACCESS_RULE_PROVIDERS: "ZK_ACL,CONFLUENT" - KAFKA_SUPER_USERS: "User:admin" - KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: "false" - KAFKA_ZOOKEEPER_SET_ACL: "true" - KAFKA_OPTS: "-Djava.security.auth.login.config=/opt/security/kafka-server.jaas" - KAFKA_JMX_HOSTNAME: "localhost" - KAFKA_JMX_PORT: 31003 - KAFKA_METRIC_REPORTERS: io.confluent.metrics.reporter.ConfluentMetricsReporter - CONFLUENT_METRICS_REPORTER_BOOTSTRAP_SERVERS: localhost:9095 - CONFLUENT_METRICS_REPORTER_ZOOKEEPER_CONNECT: zookeeper:2181 - CONFLUENT_METRICS_REPORTER_TOPIC_REPLICAS: 1 - CONFLUENT_METRICS_REPORTER_SECURITY_PROTOCOL: SASL_PLAINTEXT - CONFLUENT_METRICS_REPORTER_SASL_MECHANISM: PLAIN - CONFLUENT_METRICS_REPORTER_SASL_JAAS_CONFIG: "org.apache.kafka.common.security.plain.PlainLoginModule required username='admin' password='admin-secret';" - - KAFKA_CONFLUENT_LICENSE_TOPIC_REPLICATION_FACTOR: 1 - volumes: - - ./security:/opt/security diff --git a/test-dataset/kafka-docker-compose/security/kafka-server.jaas b/test-dataset/kafka-docker-compose/security/kafka-server.jaas deleted file mode 100644 index 878ed2f..0000000 --- a/test-dataset/kafka-docker-compose/security/kafka-server.jaas +++ /dev/null @@ -1,24 +0,0 @@ -// Server config - used to authorise -KafkaServer { - org.apache.kafka.common.security.plain.PlainLoginModule required - username="admin" - password="admin-secret" - user_admin="admin-secret" - user_producer="producer-secret" - user_consumer="consumer-secret"; -}; -// Client config used to connect to Kafka -KafkaClient { - org.apache.kafka.common.security.plain.PlainLoginModule required - username="admin" - password="admin-secret"; -}; - -// Client config user to connect to Zookeeper -Client { - org.apache.zookeeper.server.auth.DigestLoginModule required - username="admin" - password="admin-secret"; -}; - - diff --git a/test-dataset/kafka-docker-compose/security/zookeeper-server.jaas b/test-dataset/kafka-docker-compose/security/zookeeper-server.jaas deleted file mode 100644 index 4425a7b..0000000 --- a/test-dataset/kafka-docker-compose/security/zookeeper-server.jaas +++ /dev/null @@ -1,6 +0,0 @@ -Server { - org.apache.zookeeper.server.auth.DigestLoginModule required - username="admin" - password="admin-secret" - user_admin="admin-secret"; -};