Compare commits
1 Commits
main
...
daniel-y-p
| Author | SHA1 | Date |
|---|---|---|
|
|
d8f5a60e7d |
|
|
@ -16,9 +16,6 @@ trim_trailing_whitespace = false
|
|||
[*.yml]
|
||||
indent_size = 2
|
||||
|
||||
[*.yaml]
|
||||
indent_size = 2
|
||||
|
||||
[*.scala]
|
||||
indent_size = 2
|
||||
|
||||
|
|
|
|||
|
|
@ -13,4 +13,4 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
* @superhx @Gezi-lzq @1sonofqiu @woshigaopp
|
||||
* @superhx @SCNieh @Chillax-0v0 @Gezi-lzq
|
||||
|
|
|
|||
|
|
@ -0,0 +1,41 @@
|
|||
---
|
||||
name: "\U0001F41B Bug report"
|
||||
about: Something is not working
|
||||
title: "[BUG] "
|
||||
labels: bug
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
### Version & Environment
|
||||
|
||||
<!--
|
||||
|
||||
Please give your AutoMQ version and environment info:
|
||||
- AutoMQ version (e.g. `v0.1.0`):
|
||||
- Operating System (e.g. from `/etc/os-release`):
|
||||
|
||||
-->
|
||||
|
||||
|
||||
### What went wrong?
|
||||
|
||||
<!--
|
||||
Describe the unexpected behavior
|
||||
-->
|
||||
|
||||
### What should have happened instead?
|
||||
|
||||
<!--
|
||||
Describe what you wanted to happen.
|
||||
-->
|
||||
|
||||
### How to reproduce the issue?
|
||||
|
||||
1.
|
||||
2.
|
||||
3.
|
||||
|
||||
### Additional information
|
||||
|
||||
Please attach any relevant logs, backtraces, or metric charts.
|
||||
|
|
@ -1,105 +0,0 @@
|
|||
name: "🐛 Bug Report"
|
||||
description: Report a problem or unexpected behavior in AutoMQ
|
||||
title: "[BUG] "
|
||||
labels: [bug]
|
||||
assignees: [your-github-username]
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Thanks for reporting a bug. Please fill out the form below to help us diagnose and fix the issue.
|
||||
|
||||
- type: input
|
||||
id: version
|
||||
attributes:
|
||||
label: AutoMQ Version
|
||||
description: Run `automq --version` to find this.
|
||||
placeholder: e.g., v0.1.0
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: input
|
||||
id: os
|
||||
attributes:
|
||||
label: Operating System
|
||||
description: Use `uname` or check `/etc/os-release`.
|
||||
placeholder: e.g., Ubuntu 20.04
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: dropdown
|
||||
id: install-method
|
||||
attributes:
|
||||
label: Installation Method
|
||||
description: How did you install AutoMQ?
|
||||
options:
|
||||
- source
|
||||
- binary
|
||||
- docker
|
||||
- package manager
|
||||
- other
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: hardware
|
||||
attributes:
|
||||
label: Hardware Configuration
|
||||
description: Describe CPU, memory, disk, and other relevant hardware specs.
|
||||
placeholder: e.g., 4-core CPU, 16GB RAM, SSD
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
id: software
|
||||
attributes:
|
||||
label: Other Relevant Software
|
||||
description: Include any additional tools or versions (e.g., Kafka version, monitoring tools).
|
||||
placeholder: e.g., Kafka v3.6.0, Prometheus v2.45.0
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
id: problem
|
||||
attributes:
|
||||
label: What Went Wrong?
|
||||
description: Describe the unexpected behavior or error message.
|
||||
placeholder: |
|
||||
e.g. Broker crashes when receiving a large number of messages.
|
||||
Error: "Connection refused: No available brokers"
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: expected
|
||||
attributes:
|
||||
label: What Should Have Happened Instead?
|
||||
description: Describe what you expected to happen.
|
||||
placeholder: e.g., The broker should have handled the load gracefully.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: reproduction
|
||||
attributes:
|
||||
label: Steps to Reproduce
|
||||
description: Provide step-by-step instructions to reproduce the issue.
|
||||
placeholder: |
|
||||
1. Start AutoMQ broker with default config.
|
||||
2. Send a large number of messages.
|
||||
3. Observe the broker behavior.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: additional
|
||||
attributes:
|
||||
label: Additional Information
|
||||
description: Add logs, stack traces, metrics, configuration, or screenshots here.
|
||||
placeholder: |
|
||||
- Logs
|
||||
- Backtraces
|
||||
- Configuration files
|
||||
- Screenshots
|
||||
validations:
|
||||
required: false
|
||||
|
|
@ -0,0 +1,16 @@
|
|||
---
|
||||
name: "\U0001F680 Feature request"
|
||||
about: Suggest an idea for AutoMQ
|
||||
title: "[Enhancement]"
|
||||
labels: enhancement
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
### Who is this for and what problem do they have today?
|
||||
|
||||
|
||||
### Why is solving this problem impactful?
|
||||
|
||||
|
||||
### Additional notes
|
||||
|
|
@ -1,58 +0,0 @@
|
|||
name: 🚀 Feature Request
|
||||
description: Suggest a new idea or improvement for AutoMQ
|
||||
title: "[Feature Request] "
|
||||
labels:
|
||||
- enhancement
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Thanks for taking the time to suggest a feature! Please fill out the form below as clearly as possible.
|
||||
|
||||
- type: input
|
||||
id: target-audience
|
||||
attributes:
|
||||
label: Who is this feature for?
|
||||
description: Who are the users that would benefit from this feature?
|
||||
placeholder: e.g. Engineers needing real-time AutoMQ monitoring
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: current-problem
|
||||
attributes:
|
||||
label: What problem are they facing today?
|
||||
description: Describe the issue or limitation they experience without this feature.
|
||||
placeholder: e.g. No built-in dashboard to monitor metrics like throughput, latency, etc.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: impact
|
||||
attributes:
|
||||
label: Why is solving this impactful?
|
||||
description: Explain the importance of this feature and how it benefits users, the project, or the community.
|
||||
placeholder: e.g. Improves system visibility, reduces debugging time, enables proactive alerting...
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: solution
|
||||
attributes:
|
||||
label: Proposed solution
|
||||
description: Describe your proposed solution or idea in detail.
|
||||
placeholder: |
|
||||
- Web dashboard with real-time charts
|
||||
- Accessible via the AutoMQ admin panel
|
||||
- Uses minimal resources
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: notes
|
||||
attributes:
|
||||
label: Additional notes
|
||||
description: Add any related issues, dependencies, or initial work done.
|
||||
placeholder: e.g. Related to issue #123, might require metrics export setup
|
||||
validations:
|
||||
required: false
|
||||
|
|
@ -0,0 +1,43 @@
|
|||
---
|
||||
name: "⭐ Good first issue"
|
||||
about: Design good first issue for new contributors
|
||||
title: "[Good first issue] "
|
||||
labels: good first issue
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
### Background
|
||||
|
||||
<!--
|
||||
|
||||
Please give your issue background.
|
||||
e.g. Now AutoMQ Kafka's logs are only stored in the local disk. We want to store them in the cloud object storage as well to offer the ability to query logs from object storage. Store logs on the object storage is cheaper and more reliable.
|
||||
|
||||
-->
|
||||
|
||||
### What's our expectation for the issue
|
||||
|
||||
<!--
|
||||
|
||||
e.g. Local file logs still exist. When log is flushed to local file system, the log data will upload to object storage as well. The log path will be like `s3://bucket-name/automq/cluster-id/broker-id/logs/xx`.
|
||||
|
||||
-->
|
||||
|
||||
### How to started
|
||||
<!--
|
||||
|
||||
Guide the developer how to complete the issue,including:
|
||||
|
||||
e.g.
|
||||
- Precondition:
|
||||
- You need to know the principal of how AutoMQ print logs to local file system.
|
||||
- What main classes are involved when you are coding:
|
||||
- `ExampleClassA`
|
||||
- `ExampleClassB`
|
||||
- Other tips:
|
||||
- You can refer to the `ExampleClassA` and `ExampleClassB` of AutoMQ Kafka to get some inspiration.
|
||||
-->
|
||||
|
||||
### Reference
|
||||
- [Kafka Official Document](https://kafka.apache.org/documentation/)
|
||||
|
|
@ -1,56 +0,0 @@
|
|||
name: "⭐ Good First Issue"
|
||||
description: Design and describe a task suitable for first-time contributors
|
||||
title: "[Good First Issue] "
|
||||
labels: [good first issue]
|
||||
assignees: [your-github-username]
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Thank you for helping us make AutoMQ better for new contributors! Please fill out the details of this good first issue.
|
||||
|
||||
- type: textarea
|
||||
id: background
|
||||
attributes:
|
||||
label: Background
|
||||
description: Explain why this issue is being created and any relevant context.
|
||||
placeholder: |
|
||||
e.g. Currently, Kafka's logs are stored only on the local disk. We want to also store them in cloud object storage like S3 to improve reliability and enable querying.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: expectation
|
||||
attributes:
|
||||
label: What's Our Expectation for This Issue?
|
||||
description: Describe what the desired outcome of this issue is.
|
||||
placeholder: |
|
||||
e.g. Log files should still exist locally, but also be uploaded to S3 using the following path format: s3://bucket-name/automq/cluster-id/broker-id/logs/xx
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: getting-started
|
||||
attributes:
|
||||
label: How to Get Started
|
||||
description: Help contributors understand how to begin working on the issue.
|
||||
placeholder: |
|
||||
- Precondition:
|
||||
- Understand how AutoMQ writes logs to the local filesystem.
|
||||
- Main Classes:
|
||||
- `LogWriter`
|
||||
- `CloudUploader`
|
||||
- Tips:
|
||||
- Refer to the `LogWriter` class to understand the current logic.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: references
|
||||
attributes:
|
||||
label: Reference Links
|
||||
description: Include any helpful links, documents, or code references.
|
||||
placeholder: |
|
||||
e.g. [Kafka Official Documentation](https://kafka.apache.org/documentation/)
|
||||
validations:
|
||||
required: false
|
||||
|
|
@ -1,25 +0,0 @@
|
|||
name: Auto Assign Issue
|
||||
on:
|
||||
issue_comment:
|
||||
types: [created]
|
||||
|
||||
jobs:
|
||||
assign-issue:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check if comment contains '/assign'
|
||||
if: contains(github.event.comment.body, '/assign')
|
||||
uses: actions/github-script@v6
|
||||
with:
|
||||
script: |
|
||||
try {
|
||||
await github.rest.issues.addAssignees({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.payload.issue.number,
|
||||
assignees: [context.payload.comment.user.login]
|
||||
});
|
||||
console.log(`Assigned issue #${context.payload.issue.number} to @${context.payload.comment.user.login}`);
|
||||
} catch (error) {
|
||||
console.error('Error assigning issue:', error);
|
||||
}
|
||||
|
|
@ -1,67 +0,0 @@
|
|||
name: Docker Bitnami Release
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
tags:
|
||||
- '[0-9]+.[0-9]+.[0-9]+'
|
||||
- '[0-9]+.[0-9]+.[0-9]+-rc[0-9]+'
|
||||
|
||||
|
||||
jobs:
|
||||
docker-release:
|
||||
name: Docker Image Release
|
||||
strategy:
|
||||
matrix:
|
||||
platform: [ "ubuntu-24.04" ]
|
||||
jdk: ["17"]
|
||||
runs-on: ${{ matrix.platform }}
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- name: Checkout Code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Set up JDK ${{ matrix.jdk }}
|
||||
uses: actions/setup-java@v3
|
||||
with:
|
||||
java-version: ${{ matrix.jdk }}
|
||||
distribution: "zulu"
|
||||
- name: Setup Gradle
|
||||
uses: gradle/gradle-build-action@v2.12.0
|
||||
- name: Get project version
|
||||
id: get_project_version
|
||||
run: |
|
||||
project_version=$(./gradlew properties | grep "version:" | awk '{print $2}')
|
||||
echo "PROJECT_VERSION=${project_version}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Build TarGz
|
||||
run: |
|
||||
./gradlew -Pprefix=automq-${{ github.ref_name }}_ --build-cache --refresh-dependencies clean releaseTarGz
|
||||
|
||||
# docker image release
|
||||
- name: Cp TarGz to Docker Path
|
||||
run: |
|
||||
cp ./core/build/distributions/automq-${{ github.ref_name }}_kafka-${{ steps.get_project_version.outputs.PROJECT_VERSION }}.tgz ./container/bitnami
|
||||
- name: Determine Image Tags
|
||||
id: image_tags
|
||||
run: |
|
||||
echo "tags=${{ secrets.DOCKERHUB_USERNAME }}/automq:${{ github.ref_name }}-bitnami" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_READ_WRITE_TOKEN }}
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: ./container/bitnami
|
||||
push: true
|
||||
tags: ${{ steps.image_tags.outputs.tags }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
|
|
@ -1,70 +0,0 @@
|
|||
name: AutoMQ Kafka Docker Release
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tag:
|
||||
description: 'AutoMQ Version Tag'
|
||||
required: false
|
||||
type: string
|
||||
workflow_run:
|
||||
workflows: ["GitHub Release"]
|
||||
types:
|
||||
- completed
|
||||
|
||||
env:
|
||||
KAFKA_VERSION: "3.9.0"
|
||||
|
||||
jobs:
|
||||
automq-kafka-release:
|
||||
name: AutoMQ Kafka Docker Image Release
|
||||
strategy:
|
||||
matrix:
|
||||
platform: [ "ubuntu-24.04" ]
|
||||
jdk: [ "17" ]
|
||||
runs-on: ${{ matrix.platform }}
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- name: Checkout Code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Get release tag
|
||||
run: |
|
||||
if [[ "${{ github.event_name }}" == "workflow_dispatch" && -n "${{ github.event.inputs.tag }}" ]]; then
|
||||
TAG="${{ github.event.inputs.tag }}"
|
||||
# use the latest tag if not specified
|
||||
elif [[ "${{ github.event_name }}" == "workflow_dispatch" ]]; then
|
||||
TAG=$(git ls-remote --tags https://github.com/AutoMQ/automq.git | grep -v '\^{}' | tail -1 | sed 's/.*refs\/tags\///')
|
||||
else
|
||||
TAG="${{ github.event.workflow_run.head_branch }}"
|
||||
fi
|
||||
|
||||
AUTOMQ_URL="https://github.com/AutoMQ/automq/releases/download/${TAG}/automq-${TAG}_kafka-${KAFKA_VERSION}.tgz"
|
||||
|
||||
{
|
||||
echo "AUTOMQ_VERSION=${TAG}-kafka"
|
||||
echo "AUTOMQ_URL=${AUTOMQ_URL}"
|
||||
} >> $GITHUB_ENV
|
||||
|
||||
- name: Set up Python 3.10
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.10"
|
||||
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_READ_WRITE_TOKEN }}
|
||||
|
||||
- name: Build and Push Docker Image
|
||||
run: |
|
||||
python3 -m venv .venv
|
||||
source .venv/bin/activate
|
||||
.venv/bin/pip install setuptools
|
||||
|
||||
cd docker
|
||||
python3 docker_release.py \
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/automq:${AUTOMQ_VERSION} \
|
||||
--kafka-url ${AUTOMQ_URL}
|
||||
|
|
@ -1,84 +0,0 @@
|
|||
name: Docker Strimzi Release
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tag:
|
||||
description: 'AutoMQ Version Tag'
|
||||
required: false
|
||||
type: string
|
||||
workflow_run:
|
||||
workflows: ["GitHub Release"]
|
||||
types:
|
||||
- completed
|
||||
|
||||
env:
|
||||
KAFKA_VERSION: "3.9.0"
|
||||
STRIMZI_REPO: "https://github.com/AutoMQ/strimzi-kafka-operator.git"
|
||||
STRIMZI_BRANCH: "main"
|
||||
|
||||
jobs:
|
||||
strimzi-release:
|
||||
name: Strimzi Image Release
|
||||
if: ${{ github.event.workflow_run.conclusion == 'success' || github.event_name == 'workflow_dispatch' }}
|
||||
strategy:
|
||||
matrix:
|
||||
platform: [ "ubuntu-24.04" ]
|
||||
jdk: ["17"]
|
||||
runs-on: ${{ matrix.platform }}
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- name: Checkout Code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Get release tag
|
||||
run: |
|
||||
if [[ "${{ github.event_name }}" == "workflow_dispatch" && -n "${{ github.event.inputs.tag }}" ]]; then
|
||||
TAG="${{ github.event.inputs.tag }}"
|
||||
# use the latest tag if not specified
|
||||
elif [[ "${{ github.event_name }}" == "workflow_dispatch" ]]; then
|
||||
TAG=$(git ls-remote --tags https://github.com/AutoMQ/automq.git | grep -v '\^{}' | tail -1 | sed 's/.*refs\/tags\///')
|
||||
else
|
||||
TAG="${{ github.event.workflow_run.head_branch }}"
|
||||
fi
|
||||
|
||||
AUTOMQ_URL="https://github.com/AutoMQ/automq/releases/download/${TAG}/automq-${TAG}_kafka-${KAFKA_VERSION}.tgz"
|
||||
|
||||
{
|
||||
echo "AUTOMQ_VERSION=${TAG}"
|
||||
echo "AUTOMQ_URL=${AUTOMQ_URL}"
|
||||
} >> $GITHUB_ENV
|
||||
|
||||
- name: Set up JDK ${{ matrix.jdk }}
|
||||
uses: actions/setup-java@v3
|
||||
with:
|
||||
java-version: ${{ matrix.jdk }}
|
||||
distribution: "zulu"
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_READ_WRITE_TOKEN }}
|
||||
|
||||
- name: Build AutoMQ Strimzi Image
|
||||
run: |
|
||||
git clone --depth 1 --branch "${{ env.STRIMZI_BRANCH }}" "${{ env.STRIMZI_REPO }}" strimzi
|
||||
cd strimzi
|
||||
|
||||
chmod +x ./tools/automq/build-automq-image.sh
|
||||
./tools/automq/build-automq-image.sh \
|
||||
"${{ env.AUTOMQ_VERSION }}" \
|
||||
"${{ env.AUTOMQ_URL }}" \
|
||||
"${{ env.KAFKA_VERSION }}" \
|
||||
"${{ secrets.DOCKERHUB_USERNAME }}" \
|
||||
"automq"
|
||||
|
|
@ -57,14 +57,12 @@ jobs:
|
|||
run: ./tests/docker/run_tests.sh
|
||||
env:
|
||||
ESK_TEST_YML: ${{ inputs.test-yaml }}
|
||||
_DUCKTAPE_OPTIONS: "--deflake 4"
|
||||
shell: bash
|
||||
- name: Run E2E tests with path
|
||||
if: ${{ inputs.test-path != '' }}
|
||||
run: ./tests/docker/run_tests.sh
|
||||
env:
|
||||
TC_PATHS: ${{ inputs.test-path }}
|
||||
_DUCKTAPE_OPTIONS: "--deflake 4"
|
||||
shell: bash
|
||||
- name: Extract results
|
||||
id: extract-results
|
||||
|
|
|
|||
|
|
@ -30,13 +30,10 @@ jobs:
|
|||
uses: gradle/gradle-build-action@v2.12.0
|
||||
|
||||
- name: Build TarGz
|
||||
id: build-targz
|
||||
run: |
|
||||
./gradlew -Pprefix=automq-${{ github.ref_name }}_ --build-cache --refresh-dependencies clean releaseTarGz
|
||||
mkdir -p core/build/distributions/latest
|
||||
LATEST_TAG=$(git tag --sort=-v:refname | grep -E '^[0-9]+\.[0-9]+\.[0-9]+$' | head -n 1)
|
||||
echo "LATEST_TAG=$LATEST_TAG"
|
||||
IS_LATEST="false"
|
||||
if [ "$LATEST_TAG" == "${{ github.ref_name }}" ]; then
|
||||
IS_LATEST=true
|
||||
fi
|
||||
|
|
@ -55,7 +52,7 @@ jobs:
|
|||
|
||||
- uses: tvrcgo/oss-action@master
|
||||
name: upload-latest
|
||||
if: ${{ github.repository_owner == 'AutoMQ' && steps.build-targz.outputs.IS_LATEST == 'true' }}
|
||||
if: ${{ github.repository_owner == 'AutoMQ' && env.IS_LATEST == 'true' }}
|
||||
with:
|
||||
bucket: ${{ secrets.UPLOAD_BUCKET }}
|
||||
key-id: ${{ secrets.UPLOAD_BUCKET_AK }}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,61 @@
|
|||
name: Nightly Extra E2E tests
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: '0 16 * * *'
|
||||
|
||||
jobs:
|
||||
benchmarks_e2e:
|
||||
name: "Run benchmarks E2E Tests"
|
||||
uses: ./.github/workflows/e2e-run.yml
|
||||
if: ${{ github.repository_owner == 'AutoMQ' }}
|
||||
with:
|
||||
suite-id: "benchmarks"
|
||||
test-path: "tests/kafkatest/benchmarks"
|
||||
runner: "e2e"
|
||||
connect_e2e_1:
|
||||
name: "Run connect E2E Tests 1"
|
||||
uses: ./.github/workflows/e2e-run.yml
|
||||
if: ${{ github.repository_owner == 'AutoMQ' }}
|
||||
with:
|
||||
suite-id: "connect1"
|
||||
test-yaml: "tests/suites/connect_test_suite1.yml"
|
||||
runner: "e2e"
|
||||
connect_e2e_2:
|
||||
name: "Run connect E2E Tests 2"
|
||||
uses: ./.github/workflows/e2e-run.yml
|
||||
if: ${{ github.repository_owner == 'AutoMQ' }}
|
||||
with:
|
||||
suite-id: "connect2"
|
||||
test-yaml: "tests/suites/connect_test_suite2.yml"
|
||||
runner: "e2e"
|
||||
connect_e2e_3:
|
||||
name: "Run connect E2E Tests 3"
|
||||
uses: ./.github/workflows/e2e-run.yml
|
||||
if: ${{ github.repository_owner == 'AutoMQ' }}
|
||||
with:
|
||||
suite-id: "connect3"
|
||||
test-yaml: "tests/suites/connect_test_suite3.yml"
|
||||
runner: "e2e"
|
||||
streams_e2e:
|
||||
name: "Run streams E2E Tests"
|
||||
uses: ./.github/workflows/e2e-run.yml
|
||||
if: ${{ github.repository_owner == 'AutoMQ' }}
|
||||
with:
|
||||
suite-id: "streams"
|
||||
test-path: "tests/kafkatest/tests/streams"
|
||||
runner: "e2e"
|
||||
e2e_summary:
|
||||
name: "E2E Tests Summary"
|
||||
runs-on: "e2e"
|
||||
if: ${{ always() && github.repository_owner == 'AutoMQ' }}
|
||||
needs: [ benchmarks_e2e, connect_e2e_1, connect_e2e_2, connect_e2e_3, streams_e2e ]
|
||||
steps:
|
||||
- name: Report results
|
||||
run: python3 tests/report_e2e_results.py
|
||||
env:
|
||||
CURRENT_REPO: ${{ github.repository }}
|
||||
RUN_ID: ${{ github.run_id }}
|
||||
WEB_HOOK_URL: ${{ secrets.E2E_REPORT_WEB_HOOK_URL }}
|
||||
DATA_MAP: "{\"benchmarks_e2e\": ${{ toJSON(needs.benchmarks_e2e.outputs) }}, \"connect_e2e_1\": ${{ toJSON(needs.connect_e2e_1.outputs) }}, \"connect_e2e_2\": ${{ toJSON(needs.connect_e2e_2.outputs) }}, \"connect_e2e_3\": ${{ toJSON(needs.connect_e2e_3.outputs) }}, \"streams_e2e\": ${{ toJSON(needs.streams_e2e.outputs) }}}"
|
||||
REPORT_TITLE_PREFIX: "Extra"
|
||||
|
|
@ -1,8 +1,8 @@
|
|||
name: Nightly E2E tests
|
||||
name: Nightly Main E2E tests
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: '0 16 1,7,14,21,28 * *'
|
||||
- cron: '0 16 * * *'
|
||||
|
||||
jobs:
|
||||
main_e2e_1:
|
||||
|
|
@ -45,51 +45,11 @@ jobs:
|
|||
suite-id: "main5"
|
||||
test-path: "tests/kafkatest/automq"
|
||||
runner: "e2e"
|
||||
benchmarks_e2e:
|
||||
name: "Run benchmarks E2E Tests"
|
||||
uses: ./.github/workflows/e2e-run.yml
|
||||
if: ${{ github.repository_owner == 'AutoMQ' }}
|
||||
with:
|
||||
suite-id: "benchmarks"
|
||||
test-path: "tests/kafkatest/benchmarks"
|
||||
runner: "e2e"
|
||||
connect_e2e_1:
|
||||
name: "Run connect E2E Tests 1"
|
||||
uses: ./.github/workflows/e2e-run.yml
|
||||
if: ${{ github.repository_owner == 'AutoMQ' }}
|
||||
with:
|
||||
suite-id: "connect1"
|
||||
test-yaml: "tests/suites/connect_test_suite1.yml"
|
||||
runner: "e2e"
|
||||
connect_e2e_2:
|
||||
name: "Run connect E2E Tests 2"
|
||||
uses: ./.github/workflows/e2e-run.yml
|
||||
if: ${{ github.repository_owner == 'AutoMQ' }}
|
||||
with:
|
||||
suite-id: "connect2"
|
||||
test-yaml: "tests/suites/connect_test_suite2.yml"
|
||||
runner: "e2e"
|
||||
connect_e2e_3:
|
||||
name: "Run connect E2E Tests 3"
|
||||
uses: ./.github/workflows/e2e-run.yml
|
||||
if: ${{ github.repository_owner == 'AutoMQ' }}
|
||||
with:
|
||||
suite-id: "connect3"
|
||||
test-yaml: "tests/suites/connect_test_suite3.yml"
|
||||
runner: "e2e"
|
||||
streams_e2e:
|
||||
name: "Run streams E2E Tests"
|
||||
uses: ./.github/workflows/e2e-run.yml
|
||||
if: ${{ github.repository_owner == 'AutoMQ' }}
|
||||
with:
|
||||
suite-id: "streams"
|
||||
test-path: "tests/kafkatest/tests/streams"
|
||||
runner: "e2e"
|
||||
e2e_summary:
|
||||
runs-on: "e2e"
|
||||
name: "E2E Tests Summary"
|
||||
if: ${{ always() && github.repository_owner == 'AutoMQ' }}
|
||||
needs: [ main_e2e_1, main_e2e_2, main_e2e_3, main_e2e_4, main_e2e_5, benchmarks_e2e, connect_e2e_1, connect_e2e_2, connect_e2e_3, streams_e2e ]
|
||||
needs: [ main_e2e_1, main_e2e_2, main_e2e_3, main_e2e_4, main_e2e_5 ]
|
||||
steps:
|
||||
- name: Report results
|
||||
run: python3 tests/report_e2e_results.py
|
||||
|
|
@ -97,5 +57,5 @@ jobs:
|
|||
CURRENT_REPO: ${{ github.repository }}
|
||||
RUN_ID: ${{ github.run_id }}
|
||||
WEB_HOOK_URL: ${{ secrets.E2E_REPORT_WEB_HOOK_URL }}
|
||||
DATA_MAP: "{\"main_e2e_1\": ${{ toJSON(needs.main_e2e_1.outputs) }}, \"main_e2e_2\": ${{ toJSON(needs.main_e2e_2.outputs) }}, \"main_e2e_3\": ${{ toJSON(needs.main_e2e_3.outputs) }}, \"main_e2e_4\": ${{ toJSON(needs.main_e2e_4.outputs) }}, \"main_e2e_5\": ${{ toJSON(needs.main_e2e_5.outputs) }}, \"benchmarks_e2e\": ${{ toJSON(needs.benchmarks_e2e.outputs) }}, \"connect_e2e_1\": ${{ toJSON(needs.connect_e2e_1.outputs) }}, \"connect_e2e_2\": ${{ toJSON(needs.connect_e2e_2.outputs) }}, \"connect_e2e_3\": ${{ toJSON(needs.connect_e2e_3.outputs) }}, \"streams_e2e\": ${{ toJSON(needs.streams_e2e.outputs) }}}"
|
||||
DATA_MAP: "{\"main_e2e_1\": ${{ toJSON(needs.main_e2e_1.outputs) }}, \"main_e2e_2\": ${{ toJSON(needs.main_e2e_2.outputs) }}, \"main_e2e_3\": ${{ toJSON(needs.main_e2e_3.outputs) }}, \"main_e2e_4\": ${{ toJSON(needs.main_e2e_4.outputs) }}, \"main_e2e_5\": ${{ toJSON(needs.main_e2e_5.outputs) }}}"
|
||||
REPORT_TITLE_PREFIX: "Main"
|
||||
|
|
|
|||
|
|
@ -1,59 +0,0 @@
|
|||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
name: Publish Maven Package
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
version:
|
||||
description: 'Version to publish'
|
||||
required: true
|
||||
push:
|
||||
tags:
|
||||
- '[0-9]+.[0-9]+.[0-9]+'
|
||||
- '[0-9]+.[0-9]+.[0-9]+-rc[0-9]+'
|
||||
|
||||
env:
|
||||
VERSION: ${{ github.event.inputs.version || github.ref_name }}
|
||||
|
||||
jobs:
|
||||
publish:
|
||||
name: "Publish to Github Packages"
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ ubuntu-22.04 ]
|
||||
jdk: [ 17 ]
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Gradle wrapper validation
|
||||
uses: gradle/actions/wrapper-validation@v3
|
||||
- name: Set up JDK ${{ matrix.jdk }}
|
||||
uses: actions/setup-java@v3
|
||||
with:
|
||||
java-version: ${{ matrix.jdk }}
|
||||
distribution: "zulu"
|
||||
- name: Setup Gradle
|
||||
uses: gradle/actions/setup-gradle@v4
|
||||
with:
|
||||
gradle-version: '8.10'
|
||||
- name: Publish
|
||||
run: |
|
||||
gradle publish -PmavenUrl='https://maven.pkg.github.com/AutoMQ/automq' \
|
||||
-PmavenUsername=${{ env.GITHUB_ACTOR }} -PmavenPassword=${{ secrets.GITHUB_TOKEN }} \
|
||||
-PskipSigning=true \
|
||||
-Pgroup=com.automq.automq -Pversion=${{ env.VERSION }}
|
||||
|
|
@ -1,31 +0,0 @@
|
|||
name: Spark Iceberg image
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
docker:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_READ_WRITE_TOKEN }}
|
||||
|
||||
- name: Build and Push
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: docker/table_topic/spark_iceberg/
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: automqinc/spark-iceberg:latest
|
||||
|
|
@ -2,32 +2,18 @@
|
|||
|
||||
Thank you for your interest in contributing! We love community contributions.
|
||||
Read on to learn how to contribute to AutoMQ.
|
||||
We appreciate first-time contributors, and we are happy to assist you in getting started. In case of questions, just
|
||||
We appreciate first time contributors and we are happy to assist you in getting started. In case of questions, just
|
||||
reach out to us via [Wechat Group](https://www.automq.com/img/----------------------------1.png)
|
||||
or [Slack](https://join.slack.com/t/automq/shared_invite/zt-29h17vye9-thf31ebIVL9oXuRdACnOIA)!
|
||||
|
||||
Before getting started, please review AutoMQ's Code of Conduct. Everyone interacting in Slack or WeChat
|
||||
Before getting started, please review AutoMQ's Code of Conduct. Everyone interacting in Slack or Wechat
|
||||
follow [Code of Conduct](CODE_OF_CONDUCT.md).
|
||||
|
||||
## Suggested Onboarding Path for New Contributors
|
||||
|
||||
If you are new to AutoMQ, it is recommended to first deploy and run AutoMQ using Docker as described in the README.
|
||||
This helps you quickly understand AutoMQ’s core concepts and behavior without local environment complexity.
|
||||
|
||||
After gaining familiarity, contributors who want to work on code can follow the steps in this guide to build and run AutoMQ locally.
|
||||
|
||||
|
||||
## Code Contributions
|
||||
|
||||
### Finding or Reporting Issues
|
||||
|
||||
- **Find an existing issue:** Look through the [existing issues](https://github.com/AutoMQ/automq/issues). Issues open for contributions are often tagged with `good first issue`. To claim an issue, simply reply with '/assign', and the GitHub bot will assign it to you. Start with
|
||||
this [tagged good first issue](https://github.com/AutoMQ/automq-for-kafka/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22).
|
||||
- **Report a new issue:** If you've found a bug or have a feature request, please [create a new issue](https://github.com/AutoMQ/automq/issues/new/choose). Select the appropriate template (Bug Report or Feature Request) and fill out the form provided.
|
||||
|
||||
If you have any questions about an issue, please feel free to ask in the issue comments. We will do our best to clarify any doubts you may have.
|
||||
|
||||
### Submitting Pull Requests
|
||||
Most of the issues open for contributions are tagged with 'good first issue.' To claim one, simply reply with 'pick up' in the issue and the AutoMQ maintainers will assign the issue to you. If you have any questions about the 'good first issue' please feel free to ask. We will do our best to clarify any doubts you may have.
|
||||
Start with
|
||||
this [tagged good first issue](https://github.com/AutoMQ/automq-for-kafka/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22)
|
||||
|
||||
The usual workflow of code contribution is:
|
||||
|
||||
|
|
@ -39,24 +25,24 @@ The usual workflow of code contribution is:
|
|||
5. Push your local branch to your fork.
|
||||
6. Submit a Pull Request so that we can review your changes.
|
||||
7. [Link an existing Issue](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue)
|
||||
(created via the steps above or an existing one you claimed) that does not include the `needs triage` label to your Pull Request. A pull request without a linked issue will be
|
||||
that does not include the `needs triage` label to your Pull Request. A pull request without a linked issue will be
|
||||
closed, otherwise.
|
||||
8. Write a PR title and description that follows the [Pull Request Template](PULL_REQUEST_TEMPLATE.md).
|
||||
9. An AutoMQ maintainer will trigger the CI tests for you and review the code.
|
||||
10. Review and respond to feedback and questions from AutoMQ maintainers.
|
||||
10. Review and respond to feedback and questions by AutoMQ maintainers.
|
||||
11. Merge the contribution.
|
||||
|
||||
Pull Request reviews are done on a regular basis.
|
||||
|
||||
> [!NOTE]
|
||||
> [!NOTE]
|
||||
> Please make sure you respond to our feedback/questions and sign our CLA.
|
||||
>
|
||||
> Pull Requests without updates will be closed due to inactivity.
|
||||
> Pull Requests without updates will be closed due inactivity.
|
||||
|
||||
## Requirement
|
||||
|
||||
| Requirement | Version |
|
||||
| ---------------------- | ---------- |
|
||||
|------------------------|------------|
|
||||
| Compiling requirements | JDK 17 |
|
||||
| Compiling requirements | Scala 2.13 |
|
||||
| Running requirements | JDK 17 |
|
||||
|
|
@ -72,21 +58,17 @@ Building AutoMQ is the same as Apache Kafka. Kafka uses Gradle as its project ma
|
|||
It is not recommended to manually install Gradle. The gradlew script in the root directory will automatically download Gradle for you, and the version is also specified by the gradlew script.
|
||||
|
||||
### Build
|
||||
|
||||
```
|
||||
./gradlew jar -x test
|
||||
```
|
||||
|
||||
### Prepare S3 service
|
||||
|
||||
Refer to this [documentation](https://docs.localstack.cloud/getting-started/installation/) to install `localstack` to mock a local S3 service or use AWS S3 service directly.
|
||||
Refer to this [documentation](https://docs.localstack.cloud/getting-started/installation/) to install `localstack` to mock a local s3 service or use AWS S3 service directly.
|
||||
|
||||
If you are using localstack then create a bucket with the following command:
|
||||
|
||||
```
|
||||
aws s3api create-bucket --bucket ko3 --endpoint=http://127.0.0.1:4566
|
||||
```
|
||||
|
||||
### Modify Configuration
|
||||
|
||||
Modify the `config/kraft/server.properties` file. The following settings need to be changed:
|
||||
|
|
@ -101,35 +83,29 @@ s3.region=us-east-1
|
|||
# The bucket of S3 service to store data
|
||||
s3.bucket=ko3
|
||||
```
|
||||
|
||||
> Tips: If you're using localstack, make sure to set the s3.endpoint to http://127.0.0.1:4566, not localhost. Set the region to us-east-1. The bucket should match the one created earlier.
|
||||
|
||||
### Format
|
||||
|
||||
Generated Cluster UUID:
|
||||
|
||||
```
|
||||
KAFKA_CLUSTER_ID="$(bin/kafka-storage.sh random-uuid)"
|
||||
```
|
||||
|
||||
Format Metadata Catalog:
|
||||
|
||||
```
|
||||
bin/kafka-storage.sh format -t $KAFKA_CLUSTER_ID -c config/kraft/server.properties
|
||||
```
|
||||
|
||||
### IDE Start Configuration
|
||||
|
||||
| Item | Value |
|
||||
| ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| Main | core/src/main/scala/kafka/Kafka.scala |
|
||||
| ClassPath | -cp kafka.core.main |
|
||||
| VM Options | -Xmx1G -Xms1G -server -XX:+UseZGC -XX:MaxDirectMemorySize=2G -Dkafka.logs.dir=logs/ -Dlog4j.configuration=file:config/log4j.properties -Dio.netty.leakDetection.level=paranoid |
|
||||
| CLI Arguments | config/kraft/server.properties |
|
||||
| Environment | KAFKA_S3_ACCESS_KEY=test;KAFKA_S3_SECRET_KEY=test |
|
||||
| Item | Value |
|
||||
|------------------------|------------|
|
||||
| Main | core/src/main/scala/kafka/Kafka.scala |
|
||||
| ClassPath | -cp kafka.core.main |
|
||||
| VM Options | -Xmx1G -Xms1G -server -XX:+UseZGC -XX:MaxDirectMemorySize=2G -Dkafka.logs.dir=logs/ -Dlog4j.configuration=file:config/log4j.properties -Dio.netty.leakDetection.level=paranoid |
|
||||
| CLI Arguments | config/kraft/server.properties|
|
||||
| Environment | KAFKA_S3_ACCESS_KEY=test;KAFKA_S3_SECRET_KEY=test |
|
||||
|
||||
> tips: If you are using localstack, just use any value of access key and secret key. If you are using real S3 service, set `KAFKA_S3_ACCESS_KEY` and `KAFKA_S3_SECRET_KEY` to the real access key and secret key that have read/write permission of S3 service.
|
||||
|
||||
|
||||
## Documentation
|
||||
|
||||
We welcome Pull Requests that enhance the grammar, structure, or fix typos in our documentation.
|
||||
|
|
|
|||
334
README.md
334
README.md
|
|
@ -1,184 +1,240 @@
|
|||
# A Diskless Kafka® on S3, Offering 10x Cost Savings and Scaling in Seconds.
|
||||
# AutoMQ: A stateless Kafka on S3, offering 10x cost savings and scaling in seconds.
|
||||
|
||||
<div align="center">
|
||||
<p align="center">
|
||||
🔥  <a
|
||||
href="https://www.automq.com/quick-start#Cloud?utm_source=github_automq_cloud"
|
||||
target="_blank"
|
||||
><b>Free trial of AutoMQ Business Edition</b></a>   
|
||||
📑  <a
|
||||
href="https://www.automq.com/docs/automq/what-is-automq/overview?utm_source=github_automq"
|
||||
href="https://docs.automq.com/docs/automq-opensource/HSiEwHVfdiO7rWk34vKcVvcvn2Z?utm_source=github"
|
||||
target="_blank"
|
||||
><b>Documentation</b></a>   
|
||||
🔥  <a
|
||||
href="https://www.automq.com/docs/automq-cloud/getting-started/install-byoc-environment/aws/install-env-from-marketplace?utm_source=github_automq"
|
||||
📃  <a
|
||||
href="https://www.automq.com/blog/introducing-automq-cloud-native-replacement-of-apache-kafka?utm_source=github"
|
||||
target="_blank"
|
||||
><b>Free trial of AutoMQ on AWS</b></a>   
|
||||
><b>AutoMQ Introduction</b></a>
|
||||
</p>
|
||||
|
||||
|
||||
[](https://www.linkedin.com/company/automq)
|
||||
[](https://go.automq.com/slack)
|
||||
[-yellow)](https://www.automq.com/blog/automq-vs-apache-kafka-a-real-aws-cloud-bill-comparison?utm_source=github_automq)
|
||||
[-orange)](https://www.automq.com/docs/automq/benchmarks/automq-vs-apache-kafka-benchmarks-and-cost?utm_source=github_automq)
|
||||
[](https://twitter.com/intent/follow?screen_name=AutoMQ_Lab)
|
||||
[](https://join.slack.com/t/automq/shared_invite/zt-29h17vye9-thf31ebIVL9oXuRdACnOIA)
|
||||
[-yellow)](https://www.automq.com/blog/automq-vs-apache-kafka-a-real-aws-cloud-bill-comparison)
|
||||
[-orange)](https://docs.automq.com/docs/automq-opensource/IJLQwnVROiS5cUkXfF0cuHnWnNd)
|
||||
[](https://gurubase.io/g/automq)
|
||||
[](https://deepwiki.com/AutoMQ/automq)
|
||||
|
||||
<a href="https://trendshift.io/repositories/9782" target="_blank"><img src="https://trendshift.io/api/badge/repositories/9782" alt="AutoMQ%2Fautomq | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
|
||||
|
||||
---
|
||||
|
||||

|
||||

|
||||

|
||||

|
||||
</div>
|
||||
|
||||
<div align="center">
|
||||
|
||||
<img width="97%" alt="automq-solgan" src="https://github.com/user-attachments/assets/bdf6c5f5-7fe1-4004-8e15-54f1aa6bc32f" />
|
||||
|
||||
<a href="https://www.youtube.com/watch?v=IB8sh639Rsg" target="_blank">
|
||||
<img alt="Grab" src="https://github.com/user-attachments/assets/01668da4-3916-4f49-97af-18f91b25f8c1" width="19%" />
|
||||
</a>
|
||||
|
||||
<a href="https://www.automq.com/customer" target="_blank">
|
||||
<img alt="Avia" src="https://github.com/user-attachments/assets/d2845e1c-caf4-444a-93f0-97b13c9c8490" width="19%" />
|
||||
</a>
|
||||
<a href="https://www.automq.com/customer" target="_blank">
|
||||
<img alt="Tencent" src="https://github.com/user-attachments/assets/2bdd205f-38c1-4110-9af1-d4c782db3395" width="19%" />
|
||||
</a>
|
||||
<a href="https://www.automq.com/customer" target="_blank">
|
||||
<img alt="Honda" src="https://github.com/user-attachments/assets/ee65af29-8ee3-404b-bf81-a004fe0c327c" width="19%" />
|
||||
</a>
|
||||
<a href="https://www.automq.com/customer" target="_blank">
|
||||
<img alt="Trip" src="https://github.com/user-attachments/assets/0cb4ae63-6dc1-43dc-9416-625a08dca2e5" width="19%" />
|
||||
</a>
|
||||
<a href="https://www.automq.com/customer" target="_blank">
|
||||
<img alt="LG" src="https://github.com/user-attachments/assets/ed9e0f87-abc6-4552-977c-f342ecb105a0" width="19%" />
|
||||
</a>
|
||||
<a href="https://www.automq.com/blog/jdcom-automq-cubefs-trillion-scale-kafka-messaging" target="_blank">
|
||||
<img alt="JD" src="https://github.com/user-attachments/assets/a7a86d2c-66fa-4575-b181-6cf56a31f880" width="19%" />
|
||||
</a>
|
||||
|
||||
<a href="https://www.automq.com/blog/automq-help-geely-auto-solve-the-pain-points-of-kafka-elasticity-in-the-v2x-scenario" target="_blank">
|
||||
<img alt="Geely" src="https://github.com/user-attachments/assets/d61f7c51-0d80-4290-a428-a941441c7ec9" width="19%" />
|
||||
</a>
|
||||
<a href="https://www.automq.com/blog/dewu-builds-trillion-level-monitoring-system-based-on-automq" target="_blank">
|
||||
<img alt="Poizon" src="https://github.com/user-attachments/assets/45f4c642-0495-4bcc-9224-d2c5c2b2f0d5" width="19%" />
|
||||
</a>
|
||||
<a href="https://www.automq.com/customer" target="_blank">
|
||||
<img alt="Bitkub" src="https://github.com/user-attachments/assets/3b95cd26-973d-4405-9d2c-289c5807bb39" width="19%" />
|
||||
</a>
|
||||
<a href="https://www.automq.com/customer" target="_blank">
|
||||
<img alt="PalmPay" src="https://github.com/user-attachments/assets/b22f70f5-7553-4283-ac20-f034868b0121" width="19%" />
|
||||
</a>
|
||||
<a href="https://www.automq.com/blog/automq-vs-kafka-evaluation-and-comparison-by-little-red-book" target="_blank">
|
||||
<img alt="RedNote" src="https://github.com/user-attachments/assets/4a62f1f3-e171-4d58-9d7e-ebabad6f8e23" width="19%" />
|
||||
</a>
|
||||
<a href="https://www.automq.com/blog/xpeng-motors-reduces-costs-by-50-by-replacing-kafka-with-automq" target="_blank">
|
||||
<img alt="XPENG" src="https://github.com/user-attachments/assets/8b32c484-a4bf-4793-80d0-f454da254337" width="19%" />
|
||||
</a>
|
||||
<a href="https://www.automq.com/customer" target="_blank">
|
||||
<img alt="OPPO" src="https://github.com/user-attachments/assets/2b6d3cf0-ae54-4073-bc06-c6623e31c6d0" width="19%" />
|
||||
</a>
|
||||
<a href="https://www.automq.com/customer" target="_blank">
|
||||
<img alt="BambuLab" src="https://github.com/user-attachments/assets/d09ded1b-3696-49ac-b38f-d02f9598b3bb" width="19%" />
|
||||
</a>
|
||||
</div>
|
||||
|
||||
## 👥 Big Companies Worldwide are Using AutoMQ
|
||||
> Here are some of our customers who are using AutoMQ from all over the world.
|
||||
<img width="1151" alt="image" src="https://github.com/user-attachments/assets/a2668e5e-eebf-479a-b85a-9611de1b60c8" />
|
||||
|
||||
- [Grab: Driving Efficiency with AutoMQ in DataStreaming Platform](https://www.youtube.com/watch?v=IB8sh639Rsg)
|
||||
- [JD.com x AutoMQ x CubeFS: A Cost-Effective Journey](https://www.automq.com/blog/jdcom-automq-cubefs-trillion-scale-kafka-messaging?utm_source=github_automq)
|
||||
- [Palmpay Uses AutoMQ to Replace Kafka, Optimizing Costs by 50%+](https://www.automq.com/blog/palmpay-uses-automq-to-replace-kafka?utm_source=github_automq)
|
||||
- [AutoMQ help Geely Auto(Fortune Global 500) solve the pain points of Kafka elasticity in the V2X scenario](https://www.automq.com/blog/automq-help-geely-auto-solve-the-pain-points-of-kafka-elasticity-in-the-v2x-scenario?utm_source=github_automq)
|
||||
- [How Asia’s Quora Zhihu uses AutoMQ to reduce Kafka cost and maintenance complexity](https://www.automq.com/blog/how-asias-quora-zhihu-use-automq-to-reduce-kafka-cost-and-maintenance-complexity?utm_source=github_automq)
|
||||
- [XPENG Motors Reduces Costs by 50%+ by Replacing Kafka with AutoMQ](https://www.automq.com/blog/xpeng-motors-reduces-costs-by-50-by-replacing-kafka-with-automq?utm_source=github_automq)
|
||||
- [Asia's GOAT, Poizon uses AutoMQ Kafka to build observability platform for massive data(30 GB/s)](https://www.automq.com/blog/asiax27s-goat-poizon-uses-automq-kafka-to-build-a-new-generation-observability-platform-for-massive-data?utm_source=github_automq)
|
||||
- [AutoMQ Helps CaoCao Mobility Address Kafka Scalability During Holidays](https://www.automq.com/blog/automq-helps-caocao-mobility-address-kafka-scalability-issues-during-mid-autumn-and-national-day?utm_source=github_automq)
|
||||
|
||||
|
||||
### Prerequisites
|
||||
Before running AutoMQ locally, please ensure:
|
||||
- Docker version 20.x or later
|
||||
- Docker Compose v2
|
||||
- At least 4 GB RAM allocated to Docker
|
||||
- Ports 9092 and 9000 are available on your system
|
||||
|
||||
|
||||
> [!Tip]
|
||||
> Deploying a production-ready AutoMQ cluster is challenging. This Quick Start is only for evaluating AutoMQ features and is not suitable for production use. For production deployment best practices, please [contact](https://www.automq.com/contact) our community for support.
|
||||
|
||||
The `docker/docker-compose.yaml` file provides a simple single-node setup for quick evaluation and development:
|
||||
```shell
|
||||
curl -O https://raw.githubusercontent.com/AutoMQ/automq/refs/tags/1.5.5/docker/docker-compose.yaml && docker compose -f docker-compose.yaml up -d
|
||||
```
|
||||
This setup features a single AutoMQ node serving as both controller and broker, alongside MinIO for S3 storage. All services operate within a Docker bridge network called `automq_net`, allowing you to start a Kafka producer in this network to test AutoMQ:
|
||||
```shell
|
||||
docker run --network automq_net automqinc/automq:latest /bin/bash -c \
|
||||
"/opt/automq/kafka/bin/kafka-producer-perf-test.sh --topic test-topic --num-records=1024000 --throughput 5120 --record-size 1024 \
|
||||
--producer-props bootstrap.servers=server1:9092 linger.ms=100 batch.size=524288 buffer.memory=134217728 max.request.size=67108864"
|
||||
```
|
||||
After testing, you can destroy the setup with:
|
||||
```shell
|
||||
docker compose -f docker-compose.yaml down
|
||||
```
|
||||
The `docker/docker-compose-cluster.yaml` file offers a more complex setup with three AutoMQ nodes, ideal for testing AutoMQ's cluster features, and can be run in the same way.
|
||||
|
||||
There are more deployment options available:
|
||||
- [Deploy Multi-Nodes Test Cluster on Docker](https://www.automq.com/docs/automq/getting-started/deploy-multi-nodes-test-cluster-on-docker?utm_source=github_automq)
|
||||
- [Deploy on Linux with 5 Nodes](https://www.automq.com/docs/automq/deployment/deploy-multi-nodes-cluster-on-linux?utm_source=github_automq)
|
||||
- [Deploy on Kubernetes](https://www.automq.com/docs/automq/deployment/deploy-multi-nodes-cluster-on-kubernetes?utm_source=github_automq)
|
||||
- [Try AutoMQ on AWS Marketplace (Two Weeks Free Trial)](https://docs.automq.com/automq-cloud/getting-started/install-byoc-environment/aws/install-env-from-marketplace?utm_source=github_automq)
|
||||
- [Try AutoMQ on Alibaba Cloud Marketplace (Two Weeks Free Trial)](https://market.aliyun.com/products/55530001/cmgj00065841.html)
|
||||
|
||||
## 🗞️ Newest Feature - Table Topic
|
||||
Table Topic is a new feature in AutoMQ that combines stream and table functionalities to unify streaming and data analysis. Currently, it supports Apache Iceberg and integrates with catalog services such as AWS Glue, HMS, and the Rest catalog. Additionally, it natively supports S3 tables, a new AWS product announced at the 2024 re:Invent. [Learn more](https://www.automq.com/blog/automq-table-topic-seamless-integration-with-s3-tables-and-iceberg?utm_source=github_automq).
|
||||
- [Palmpay Uses AutoMQ to Replace Kafka, Optimizing Costs by 50%+](https://www.automq.com/blog/palmpay-uses-automq-to-replace-kafka)
|
||||
- [AutoMQ help Geely Auto(Fortune Global 500) solve the pain points of Kafka elasticity in the V2X scenario](https://www.automq.com/blog/automq-help-geely-auto-solve-the-pain-points-of-kafka-elasticity-in-the-v2x-scenario)
|
||||
- [How Asia’s Quora Zhihu uses AutoMQ to reduce Kafka cost and maintenance complexity](https://www.automq.com/blog/how-asias-quora-zhihu-use-automq-to-reduce-kafka-cost-and-maintenance-complexity)
|
||||
- [XPENG Motors Reduces Costs by 50%+ by Replacing Kafka with AutoMQ](https://www.automq.com/blog/xpeng-motors-reduces-costs-by-50-by-replacing-kafka-with-automq)
|
||||
- [Asia's GOAT, Poizon uses AutoMQ Kafka to build observability platform for massive data(30 GB/s)](https://www.automq.com/blog/asiax27s-goat-poizon-uses-automq-kafka-to-build-a-new-generation-observability-platform-for-massive-data)
|
||||
- [AutoMQ Helps CaoCao Mobility Address Kafka Scalability During Holidays](https://www.automq.com/blog/automq-helps-caocao-mobility-address-kafka-scalability-issues-during-mid-autumn-and-national-day)
|
||||
|
||||
## 🗞️ Newest Feature
|
||||
Table Topic feature for unified stream and data analysis, which now supports the S3 table feature announced at the 2024 re:Invent. [Learn more](https://www.automq.com/blog/automq-table-topic-seamless-integration-with-s3-tables-and-iceberg).
|
||||

|
||||
|
||||
## 🔶 Why AutoMQ
|
||||
AutoMQ is a stateless Kafka alternative that runs on S3 or any S3-compatible storage, such as MinIO. It is designed to address two major issues of Apache Kafka. First, Kafka clusters are difficult to scale out or in due to the stateful nature of its brokers. Data movement is required, and even reassigning partitions between brokers is a complex process. Second, hosting Kafka in the cloud can be prohibitively expensive. You face high costs for EBS storage, cross-AZ traffic, and significant over-provisioning due to Kafka's limited scalability.
|
||||
|
||||
Here are some key highlights of AutoMQ that make it an ideal choice to replace your Apache Kafka cluster, whether in the cloud or on-premise, as long as you have S3-compatible storage:
|
||||
- **Cost effective**: The first true cloud-native streaming storage system, designed for optimal cost and efficiency on the cloud. Refer to [this report](https://www.automq.com/docs/automq/benchmarks/cost-effective-automq-vs-apache-kafka?utm_source=github_automq) to see how we cut Apache Kafka billing by 90% on the cloud.
|
||||
- **High Reliability**: Leverage object storage service to achieve zero RPO, RTO in seconds and 99.999999999% durability.
|
||||
- **Zero Cross-AZ Traffic**: By using cloud object storage as the priority storage solution, AutoMQ eliminates cross-AZ traffic costs on AWS and GCP. In traditional Kafka setups, over 80% of costs arise from cross-AZ traffic, including producer, consumer, and replication sides.
|
||||
## 🍵 AutoMQ vs Other Streaming Platforms
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<th>Feature</th>
|
||||
<th>AutoMQ</th>
|
||||
<th>Apache Kafka</th>
|
||||
<th>Confluent</th>
|
||||
<th>Apache Pulsar</th>
|
||||
<th>Redpanda</th>
|
||||
<th>Warpstream</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Apache Kafka Compatibility[1]</td>
|
||||
<td colspan="3">Native Kafka</td>
|
||||
<td>Non-Kafka</td>
|
||||
<td colspan="2">Kafka Protocol</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Source Code Availability</td>
|
||||
<td>Yes</td>
|
||||
<td>Yes</td>
|
||||
<td>No</td>
|
||||
<td>Yes</td>
|
||||
<td>Yes</td>
|
||||
<td>No</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Stateless Broker</td>
|
||||
<td>Yes</td>
|
||||
<td>No</td>
|
||||
<td>No</td>
|
||||
<td>Yes</td>
|
||||
<td>No</td>
|
||||
<td>Yes</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Publisher Latency(P99)</td>
|
||||
<td colspan="5">Single-digit ms latency</td>
|
||||
<td><a href="https://www.warpstream.com/blog/warpstream-benchmarks-and-tco">> 620ms</a></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Continuous Self-Balancing</td>
|
||||
<td>Yes</td>
|
||||
<td>No</td>
|
||||
<td>Yes</td>
|
||||
<td>Yes</td>
|
||||
<td>Yes</td>
|
||||
<td>Yes</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Scale in/out</td>
|
||||
<td>In seconds</td>
|
||||
<td>In hours/days</td>
|
||||
<td>In hours</td>
|
||||
<td>In hours<br>(scale-in);<br> In seconds<br>(scale-out)</td>
|
||||
<td>In hours<br>In seconds (Enterprise Only)</td>
|
||||
<td>In seconds</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Spot Instance Support</td>
|
||||
<td>Yes</td>
|
||||
<td>No</td>
|
||||
<td>No</td>
|
||||
<td>No</td>
|
||||
<td>No</td>
|
||||
<td>Yes</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Partition Reassignment</td>
|
||||
<td>In seconds</td>
|
||||
<td>In hours/days</td>
|
||||
<td>In hours</td>
|
||||
<td>In seconds</td>
|
||||
<td>In hours<br>In seconds (Enterprise Only)</td>
|
||||
<td>In seconds</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Component</td>
|
||||
<td>Broker</td>
|
||||
<td colspan="2">Broker<br>Zookeeper<br>(Non-KRaft)</td>
|
||||
<td>Broker<br>Zookeeper<br>Bookkeeper<br>Proxy(Optional)</td>
|
||||
<td>Broker</td>
|
||||
<td>Agent<br>MetadataServer</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Durability</td>
|
||||
<td>Guaranteed by cloud storage services[2]</td>
|
||||
<td colspan="2">Guaranteed by ISR </td>
|
||||
<td>Guaranteed by Bookkeeper</td>
|
||||
<td>Guaranteed by Raft</td>
|
||||
<td>Guaranteed by S3</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Inter-AZ Networking Fees</td>
|
||||
<td>No</td>
|
||||
<td colspan="4">Yes</td>
|
||||
<td>No</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
|
||||
> [1] Apache Kafka Compatibility's definition is coming from this [blog](https://www.kai-waehner.de/blog/2021/05/09/kafka-api-de-facto-standard-event-streaming-like-amazon-s3-object-storage/).
|
||||
|
||||
> [2] AutoMQ's flexible architecture can utilize the durability of various cloud storage services like S3, Regional EBS, and EFS, all offering multi-AZ durability.
|
||||
|
||||
## 🔶 Why AutoMQ
|
||||
|
||||
- **Cost effective**: The first true cloud-native streaming storage system, designed for optimal cost and efficiency on the cloud. Refer to [this report](https://docs.automq.com/docs/automq-opensource/EV6mwoC95ihwRckMsUKcppnqnJb) to see how we cut Apache Kafka billing by 90% on the cloud.
|
||||
- **High Reliability**: Leverage cloud-shared storage services to achieve zero RPO, RTO in seconds and 99.999999999% durability.
|
||||
- **Serverless**:
|
||||
- Auto Scaling: Monitor cluster metrics and automatically scale in/out to align with your workload, enabling a pay-as-you-go model.
|
||||
- Scaling in seconds: The computing layer (broker) is stateless and can scale in/out within seconds, making AutoMQ a truly serverless solution.
|
||||
- Infinite scalable: Utilize cloud object storage as the primary storage solution, eliminating concerns about storage capacity.
|
||||
- Auto Scaling: Monitor cluster metrics and automatically scale in/out to align with your workload, enabling a pay-as-you-go model.
|
||||
- Scaling in seconds: The computing layer (broker) is stateless and can scale in/out within seconds, making AutoMQ a truly serverless solution.
|
||||
- Infinite scalable: Utilize cloud object storage as the primary storage solution, eliminating concerns about storage capacity.
|
||||
- **Manage-less**: The built-in auto-balancer component automatically schedules partitions and network traffic between brokers, eliminating manual partition reassignment.
|
||||
- **High performance**:
|
||||
- High throughput: Leverage pre-fetching, batch processing, and parallel technologies to maximize the capabilities of cloud object storage. Refer to the [AutoMQ Performance White Paper](https://www.automq.com/docs/automq/benchmarks/automq-vs-apache-kafka-benchmarks-and-cost?utm_source=github_automq) to see how we achieve this.
|
||||
- Low Latency: AutoMQ defaults to running on S3 directly, resulting in hundreds of milliseconds of latency. The enterprise version offers single-digit millisecond latency. [Contact us](https://www.automq.com/contact?utm_source=github_automq) for more details.
|
||||
- **Built-in Metrics Export**: Natively export Prometheus and OpenTelemetry metrics, supporting both push and pull. Ditch inefficient JMX and monitor your cluster with modern tools. Refer to [full metrics list](https://www.automq.com/docs/automq/observability/metrics?utm_source=github_automq) provided by AutoMQ.
|
||||
- **100% Kafka Compatible**: Fully compatible with Apache Kafka, offering all features with greater cost-effectiveness and operational efficiency.
|
||||
- Low latency: Accelerate writing with high-performance EBS as WAL, achieving single-digit millisecond latency.
|
||||
- High throughput: Leverage pre-fetching, batch processing, and parallel technologies to maximize the capabilities of cloud object storage.
|
||||
> Refer to the [AutoMQ Performance White Paper](https://docs.automq.com/docs/automq-opensource/IJLQwnVROiS5cUkXfF0cuHnWnNd) to see how we achieve this.
|
||||
- **A superior alternative to Apache Kafka**: 100% compatible with Apache Kafka and does not lose any key features, but cheaper and better.
|
||||
|
||||
## ✨Architecture
|
||||
AutoMQ is a fork of the open-source [Apache Kafka](https://github.com/apache/kafka). We've introduced a new storage engine based on object storage, transforming the classic shared-nothing architecture into a shared storage architecture.
|
||||
|
||||

|
||||
|
||||
Regarding the architecture of AutoMQ, it is fundamentally different from Kafka. The core difference lies in the storage layer of Apache Kafka and how we leverage object storage to achieve a stateless broker architecture. AutoMQ consists of below key components:
|
||||
- S3 Storage Adapter: an adapter layer that reimplements the UnifiedLog, LocalLog, and LogSegment classes to create logs on S3 instead of a local disk. Traditional local disk storage is still supported if desired.
|
||||
- S3Stream: a shared streaming storage library that encapsulates various storage modules, including WAL and object storage. WAL is a write-ahead log optimized for frequent writes and low IOPS to reduce S3 API costs. To boost read performance, we use LogCache and BlockCache for improved efficiency.
|
||||
- Auto Balancer: a component that automatically balances traffic and partitions between brokers, eliminating the need for manual reassignment. Unlike Kafka, this built-in feature removes the need for cruise control.
|
||||
- Rack-aware Router: Kafka has long faced cross-AZ traffic fees on AWS and GCP. Our shared storage architecture addresses this by using a rack-aware router to provide clients in different AZs with specific partition metadata, avoiding cross-AZ fees while exchanging data through object storage.
|
||||
AutoMQ's Shared Storage architecture revolutionizes the storage layer of Apache Kafka by offloading data to cloud storage, thereby rendering the Broker stateless. This architecture incorporates both WAL (Write-Ahead Logging) storage and object storage, storing all data in object storage in near real-time.
|
||||
|
||||
For more on AutoMQ's architecture, visit [AutoMQ Architecture](https://www.automq.com/docs/automq/architecture/overview?utm_source=github_automq) or explore the source code directly.
|
||||
In this setup:
|
||||
|
||||
## 🌟 Stay Ahead
|
||||
Star AutoMQ on GitHub for instant updates on new releases.
|
||||

|
||||
- Object storage is the primary data repository, providing a flexible, cost-effective, and scalable storage solution.
|
||||
- AutoMQ introduces a WAL storage layer to counter the high latency and low IOPS associated with Object storage, thereby improving data write efficiency and lowering IOPS usage.
|
||||
- The WAL storage layer is adaptable, allowing for the selection of various storage services across different cloud providers to cater to diverse durability and performance needs. Azure Zone-redundant Disk, GCP Regional Persistent Disk, and Alibaba Cloud Regional ESSD are ideal for ensuring multi-AZ durability. For cost-effective solutions on AWS with relaxed latency scenarios, S3 can serve as WAL. Additionally, AWS EFS/FSx can balance latency and cost for critical workloads when used as WAL.
|
||||
|
||||
AutoMQ has developed a shared streaming storage library, S3Stream, which encapsulates these storage modules. By replacing the native Apache Kafka® Log storage with S3Stream, the entire Broker node becomes entirely stateless. This transformation significantly streamlines operations such as second-level partition reassignment, automatic scaling, and traffic self-balancing. To facilitate this, AutoMQ has integrated Controller components like Auto Scaling and Auto Balancing within its kernel, which oversee cluster scaling operations and traffic rebalancing, respectively. Please refer to [here](https://docs.automq.com/automq/architecture/overview) for more architecture details.
|
||||
|
||||
## ⛄ Get started with AutoMQ
|
||||
|
||||
### Deploy Locally on a Single Host
|
||||
```
|
||||
curl https://download.automq.com/community_edition/standalone_deployment/install_run.sh | bash
|
||||
```
|
||||
|
||||
The easiest way to run AutoMQ. You can experience features like **Partition Reassignment in Seconds** and **Continuous Self-Balancing** in your local machine. [Learn more](https://docs.automq.com/docs/automq-opensource/EsUBwQei4ilCDjkWb8WcbOZInwc)
|
||||
|
||||
There are more deployment options available:
|
||||
- [Deploy on Linux with 5 Nodes](https://docs.automq.com/docs/automq-opensource/IyXrw3lHriVPdQkQLDvcPGQdnNh)
|
||||
- [Deploy on Kubernetes(Enterprise Edition Only)](https://docs.automq.com/docs/automq-opensource/KJtLwvdaPi7oznkX3lkcCR7fnte)
|
||||
- [Runs on Ceph / MinIO / CubeFS / HDFS](https://docs.automq.com/docs/automq-opensource/RexrwfhKuiGChfk237QcEBIwnND)
|
||||
- [Try AutoMQ on Alibaba Cloud Marketplace (Two Weeks Free Trial)](https://market.aliyun.com/products/55530001/cmgj00065841.html)
|
||||
- [Try AutoMQ on AWS Marketplace (Two Weeks Free Trial)](https://docs.automq.com/automq-cloud/getting-started/install-byoc-environment/aws/install-env-from-marketplace)
|
||||
|
||||
## 💬 Community
|
||||
You can join the following groups or channels to discuss or ask questions about AutoMQ:
|
||||
- Ask questions or report a bug by [GitHub Issues](https://github.com/AutoMQ/automq/issues)
|
||||
- Discuss about AutoMQ or Kafka by [Slack](https://go.automq.com/slack) or [Wechat Group](docs/images/automq-wechat.png)
|
||||
- Discuss about AutoMQ or Kafka by [Slack](https://join.slack.com/t/automq/shared_invite/zt-29h17vye9-thf31ebIVL9oXuRdACnOIA) or [Wechat Group](docs/images/automq-wechat.png)
|
||||
|
||||
|
||||
## 👥 How to contribute
|
||||
If you've found a problem with AutoMQ, please open a [GitHub Issues](https://github.com/AutoMQ/automq/issues).
|
||||
To contribute to AutoMQ please see [Code of Conduct](CODE_OF_CONDUCT.md) and [Contributing Guide](CONTRIBUTING_GUIDE.md).
|
||||
We have a list of [good first issues](https://github.com/AutoMQ/automq/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) that help you to get started, gain experience, and get familiar with our contribution process.
|
||||
We have a list of [good first issues](https://github.com/AutoMQ/automq/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) that help you to get started, gain experience, and get familiar with our contribution process. To claim one, simply reply with 'pick up' in the issue and the AutoMQ maintainers will assign the issue to you. If you have any questions about the 'good first issue' please feel free to ask. We will do our best to clarify any doubts you may have.
|
||||
|
||||
## 👍 AutoMQ Enterprise Edition
|
||||
The enterprise edition of AutoMQ offers a robust, user-friendly control plane for seamless cluster management, with enhanced availability and observability over the open-source version. Additionally, we offer [Kafka Linking](https://www.automq.com/solutions/kafka-linking?utm_source=github_automq) for zero-downtime migration from any Kafka-compatible cluster to AutoMQ.
|
||||
## 👍 AutoMQ Business Edition
|
||||
The business edition of AutoMQ provides a powerful and easy-to-use control plane to help you manage clusters effortlessly. Meanwhile, the control plane is more powerful in terms of availability and observability compared to the community edition.
|
||||
|
||||
[Contact us](https://www.automq.com/contact?utm_source=github_automq) for more information about the AutoMQ enterprise edition, and we'll gladly assist with your free trial.
|
||||
> You can check the difference between the community and business editions [here](https://www.automq.com/product).
|
||||
|
||||
## 📜 License
|
||||
AutoMQ is under the Apache 2.0 license. See the [LICENSE](https://github.com/AutoMQ/automq/blob/main/LICENSE) file for details.
|
||||
|
||||
## 📝 Trademarks
|
||||
Apache®, Apache Kafka®, Kafka®, Apache Iceberg®, Iceberg® and associated open source project names are trademarks of the Apache Software Foundation
|
||||
<b>Watch the following video and refer to our [docs](https://docs.automq.com/automq-cloud/getting-started/install-byoc-environment/aws/install-env-via-terraform-module) to see how to deploy AutoMQ Business Edition with 2 weeks free license for PoC.</b>
|
||||
|
||||
<b> ⬇️ ⬇️ ⬇️ </b>
|
||||
|
||||
[](https://www.youtube.com/watch?v=O40zp81x97w)
|
||||
|
||||
|
||||
|
||||
### Free trial of AutoMQ Business Edition
|
||||
To allow users to experience the capabilities of the AutoMQ business edition without any barriers, click [here](https://www.automq.com/quick-start#Cloud?utm_source=github_automq_cloud) to apply for a no-obligation cluster trial, and note `AutoMQ Cloud Free Trial` in the message input box. We will immediately initialize an AutoMQ Cloud control panel for you soon in the cloud and give you the address of the control panel. Then, you can use the control panel to create a AutoMQ cluster or perform operations like scale in/out.
|
||||
|
||||
No need to bind a credit card, no cost at all. We look forward to receiving valuable feedback from you to make our product better. If you want to proceed with a formal POC, you can also contact us through [Contact Us](https://www.automq.com/contact). We will further support your official POC.
|
||||
|
||||
## 🐱 The relationship with Apache Kafka
|
||||
|
||||
AutoMQ is a fork of the open-source [Apache Kafka](https://github.com/apache/kafka). Based on the Apache Kafka codebase, we found an aspect at the LogSegment level, and replaced Kafka's storage layer with our self-developed cloud-native stream storage engine, [S3Stream](https://github.com/AutoMQ/automq/tree/main/s3stream). This engine can provide customers with high-performance, low-cost, and unlimited stream storage capabilities based on cloud storage like EBS WAL and S3. As such, AutoMQ completely retains the code of Kafka's computing layer and is 100% fully compatible with Apache Kafka. We appreciate the work done by the Apache Kafka community and will continue to embrace the Kafka community.
|
||||
|
||||
## 🙋 Contact Us
|
||||
Want to learn more, [Talk with our product experts](https://www.automq.com/contact).
|
||||
|
|
|
|||
|
|
@ -1,125 +0,0 @@
|
|||
# AutoMQ Log Uploader Module
|
||||
|
||||
This module provides asynchronous S3 log upload capability based on Log4j 1.x. Other submodules only need to depend on this module and configure it simply to synchronize logs to object storage. Core components:
|
||||
|
||||
- `com.automq.log.S3RollingFileAppender`: Extends `RollingFileAppender`, pushes log events to the uploader while writing to local files.
|
||||
- `com.automq.log.uploader.LogUploader`: Asynchronously buffers, compresses, and uploads logs; supports configuration switches and periodic cleanup.
|
||||
- `com.automq.log.uploader.S3LogConfig`: Interface that abstracts the configuration required for uploading. Implementations must provide cluster ID, node ID, object storage instance, and leadership status.
|
||||
|
||||
## Quick Integration
|
||||
|
||||
1. Add dependency in your module's `build.gradle`:
|
||||
```groovy
|
||||
implementation project(':automq-log-uploader')
|
||||
```
|
||||
2. Implement or provide an `S3LogConfig` instance and configure the appender:
|
||||
|
||||
```java
|
||||
// Set up the S3LogConfig through your application
|
||||
S3LogConfig config = // your S3LogConfig implementation
|
||||
S3RollingFileAppender.setup(config);
|
||||
```
|
||||
3. Reference the Appender in `log4j.properties`:
|
||||
|
||||
```properties
|
||||
log4j.appender.s3_uploader=com.automq.log.S3RollingFileAppender
|
||||
log4j.appender.s3_uploader.File=logs/server.log
|
||||
log4j.appender.s3_uploader.MaxFileSize=100MB
|
||||
log4j.appender.s3_uploader.MaxBackupIndex=10
|
||||
log4j.appender.s3_uploader.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.s3_uploader.layout.ConversionPattern=[%d] %p %m (%c)%n
|
||||
```
|
||||
|
||||
## S3LogConfig Interface
|
||||
|
||||
The `S3LogConfig` interface provides the configuration needed for log uploading:
|
||||
|
||||
```java
|
||||
public interface S3LogConfig {
|
||||
boolean isEnabled(); // Whether S3 upload is enabled
|
||||
String clusterId(); // Cluster identifier
|
||||
int nodeId(); // Node identifier
|
||||
ObjectStorage objectStorage(); // S3 object storage instance
|
||||
boolean isLeader(); // Whether this node should upload logs
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
The upload schedule can be overridden by environment variables:
|
||||
|
||||
- `AUTOMQ_OBSERVABILITY_UPLOAD_INTERVAL`: Maximum upload interval (milliseconds).
|
||||
- `AUTOMQ_OBSERVABILITY_CLEANUP_INTERVAL`: Retention period (milliseconds), old objects earlier than this time will be cleaned up.
|
||||
|
||||
## Implementation Notes
|
||||
|
||||
### Leader Selection
|
||||
|
||||
The log uploader relies on the `S3LogConfig.isLeader()` method to determine whether the current node should upload logs and perform cleanup tasks. This avoids multiple nodes in a cluster simultaneously executing these operations.
|
||||
|
||||
### Object Storage Path
|
||||
|
||||
Logs are uploaded to object storage following this path pattern:
|
||||
```
|
||||
automq/logs/{clusterId}/{nodeId}/{hour}/{uuid}
|
||||
```
|
||||
|
||||
Where:
|
||||
- `clusterId` and `nodeId` come from the S3LogConfig
|
||||
- `hour` is the timestamp hour for log organization
|
||||
- `uuid` is a unique identifier for each log batch
|
||||
|
||||
## Usage Example
|
||||
|
||||
Complete example of using the log uploader:
|
||||
|
||||
```java
|
||||
import com.automq.log.S3RollingFileAppender;
|
||||
import com.automq.log.uploader.S3LogConfig;
|
||||
import com.automq.stream.s3.operator.ObjectStorage;
|
||||
|
||||
// Implement S3LogConfig
|
||||
public class MyS3LogConfig implements S3LogConfig {
|
||||
@Override
|
||||
public boolean isEnabled() {
|
||||
return true; // Enable S3 upload
|
||||
}
|
||||
|
||||
@Override
|
||||
public String clusterId() {
|
||||
return "my-cluster";
|
||||
}
|
||||
|
||||
@Override
|
||||
public int nodeId() {
|
||||
return 1;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ObjectStorage objectStorage() {
|
||||
// Return your ObjectStorage instance
|
||||
return myObjectStorage;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isLeader() {
|
||||
// Return true if this node should upload logs
|
||||
return isCurrentNodeLeader();
|
||||
}
|
||||
}
|
||||
|
||||
// Setup and use
|
||||
S3LogConfig config = new MyS3LogConfig();
|
||||
S3RollingFileAppender.setup(config);
|
||||
|
||||
// Configure Log4j to use the appender
|
||||
// The appender will now automatically upload logs to S3
|
||||
```
|
||||
|
||||
## Lifecycle Management
|
||||
|
||||
Remember to properly shutdown the log uploader when your application terminates:
|
||||
|
||||
```java
|
||||
// During application shutdown
|
||||
S3RollingFileAppender.shutdown();
|
||||
```
|
||||
|
|
@ -1,105 +0,0 @@
|
|||
/*
|
||||
* Copyright 2025, AutoMQ HK Limited.
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package com.automq.log;
|
||||
|
||||
import com.automq.log.uploader.LogRecorder;
|
||||
import com.automq.log.uploader.LogUploader;
|
||||
import com.automq.log.uploader.S3LogConfig;
|
||||
|
||||
import org.apache.log4j.RollingFileAppender;
|
||||
import org.apache.log4j.spi.LoggingEvent;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
public class S3RollingFileAppender extends RollingFileAppender {
|
||||
|
||||
private static final Logger LOGGER = LoggerFactory.getLogger(S3RollingFileAppender.class);
|
||||
private static final Object INIT_LOCK = new Object();
|
||||
|
||||
private static volatile LogUploader logUploaderInstance;
|
||||
private static volatile S3LogConfig s3LogConfig;
|
||||
|
||||
public S3RollingFileAppender() {
|
||||
super();
|
||||
}
|
||||
|
||||
public static void setup(S3LogConfig config) {
|
||||
s3LogConfig = config;
|
||||
synchronized (INIT_LOCK) {
|
||||
if (logUploaderInstance != null) {
|
||||
return;
|
||||
}
|
||||
try {
|
||||
if (s3LogConfig == null) {
|
||||
LOGGER.error("No s3LogConfig available; S3 log upload remains disabled.");
|
||||
throw new RuntimeException("S3 log configuration is missing.");
|
||||
}
|
||||
if (!s3LogConfig.isEnabled() || s3LogConfig.objectStorage() == null) {
|
||||
LOGGER.warn("S3 log upload is disabled by configuration.");
|
||||
return;
|
||||
}
|
||||
|
||||
LogUploader uploader = new LogUploader();
|
||||
uploader.start(s3LogConfig);
|
||||
logUploaderInstance = uploader;
|
||||
LOGGER.info("S3RollingFileAppender initialized successfully using s3LogConfig {}.", s3LogConfig.getClass().getName());
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("Failed to initialize S3RollingFileAppender", e);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public static void shutdown() {
|
||||
if (logUploaderInstance != null) {
|
||||
synchronized (INIT_LOCK) {
|
||||
if (logUploaderInstance != null) {
|
||||
try {
|
||||
logUploaderInstance.close();
|
||||
logUploaderInstance = null;
|
||||
LOGGER.info("S3RollingFileAppender log uploader closed successfully.");
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("Failed to close S3RollingFileAppender log uploader", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void subAppend(LoggingEvent event) {
|
||||
super.subAppend(event);
|
||||
if (!closed && logUploaderInstance != null) {
|
||||
LogRecorder.LogEvent logEvent = new LogRecorder.LogEvent(
|
||||
event.getTimeStamp(),
|
||||
event.getLevel().toString(),
|
||||
event.getLoggerName(),
|
||||
event.getRenderedMessage(),
|
||||
event.getThrowableStrRep());
|
||||
|
||||
try {
|
||||
logEvent.validate();
|
||||
logUploaderInstance.append(logEvent);
|
||||
} catch (IllegalArgumentException e) {
|
||||
errorHandler.error("Failed to validate and append log event", e, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,69 +0,0 @@
|
|||
/*
|
||||
* Copyright 2025, AutoMQ HK Limited.
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package com.automq.log.uploader.util;
|
||||
|
||||
import com.automq.stream.s3.ByteBufAlloc;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.util.zip.GZIPInputStream;
|
||||
import java.util.zip.GZIPOutputStream;
|
||||
|
||||
import io.netty.buffer.ByteBuf;
|
||||
|
||||
public class Utils {
|
||||
|
||||
private Utils() {
|
||||
}
|
||||
|
||||
public static ByteBuf compress(ByteBuf input) throws IOException {
|
||||
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
|
||||
try (GZIPOutputStream gzipOutputStream = new GZIPOutputStream(byteArrayOutputStream)) {
|
||||
byte[] buffer = new byte[input.readableBytes()];
|
||||
input.readBytes(buffer);
|
||||
gzipOutputStream.write(buffer);
|
||||
}
|
||||
|
||||
ByteBuf compressed = ByteBufAlloc.byteBuffer(byteArrayOutputStream.size());
|
||||
compressed.writeBytes(byteArrayOutputStream.toByteArray());
|
||||
return compressed;
|
||||
}
|
||||
|
||||
public static ByteBuf decompress(ByteBuf input) throws IOException {
|
||||
byte[] compressedData = new byte[input.readableBytes()];
|
||||
input.readBytes(compressedData);
|
||||
ByteArrayInputStream byteArrayInputStream = new ByteArrayInputStream(compressedData);
|
||||
|
||||
try (GZIPInputStream gzipInputStream = new GZIPInputStream(byteArrayInputStream);
|
||||
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream()) {
|
||||
byte[] buffer = new byte[1024];
|
||||
int bytesRead;
|
||||
while ((bytesRead = gzipInputStream.read(buffer)) != -1) {
|
||||
byteArrayOutputStream.write(buffer, 0, bytesRead);
|
||||
}
|
||||
|
||||
byte[] uncompressedData = byteArrayOutputStream.toByteArray();
|
||||
ByteBuf output = ByteBufAlloc.byteBuffer(uncompressedData.length);
|
||||
output.writeBytes(uncompressedData);
|
||||
return output;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,459 +0,0 @@
|
|||
# AutoMQ automq-metrics Module
|
||||
|
||||
## Module Structure
|
||||
|
||||
```
|
||||
com.automq.opentelemetry/
|
||||
├── AutoMQTelemetryManager.java # Main management class for initialization and lifecycle
|
||||
├── TelemetryConstants.java # Constants definition
|
||||
├── common/
|
||||
│ ├── OTLPCompressionType.java # OTLP compression types
|
||||
│ └── OTLPProtocol.java # OTLP protocol types
|
||||
├── exporter/
|
||||
│ ├── MetricsExporter.java # Exporter interface
|
||||
│ ├── MetricsExportConfig.java # Export configuration
|
||||
│ ├── MetricsExporterProvider.java # Exporter factory provider
|
||||
│ ├── MetricsExporterType.java # Exporter type enumeration
|
||||
│ ├── MetricsExporterURI.java # URI parser for exporters
|
||||
│ ├── OTLPMetricsExporter.java # OTLP exporter implementation
|
||||
│ ├── PrometheusMetricsExporter.java # Prometheus exporter implementation
|
||||
│ └── s3/ # S3 metrics exporter implementation
|
||||
│ ├── CompressionUtils.java # Utility for data compression
|
||||
│ ├── PrometheusUtils.java # Utilities for Prometheus format
|
||||
│ ├── S3MetricsExporter.java # S3 metrics exporter implementation
|
||||
│ └── S3MetricsExporterAdapter.java # Adapter to handle S3 metrics export
|
||||
└── yammer/
|
||||
├── DeltaHistogram.java # Delta histogram implementation
|
||||
├── OTelMetricUtils.java # OpenTelemetry metrics utilities
|
||||
├── YammerMetricsProcessor.java # Yammer metrics processor
|
||||
└── YammerMetricsReporter.java # Yammer metrics reporter
|
||||
```
|
||||
|
||||
The AutoMQ OpenTelemetry module is a telemetry data collection and export component based on OpenTelemetry SDK, specifically designed for AutoMQ Kafka. This module provides unified telemetry data management capabilities, supporting the collection of JVM metrics, JMX metrics, and Yammer metrics, and can export data to Prometheus, OTLP-compatible backend systems, or S3-compatible storage.
|
||||
|
||||
## Core Features
|
||||
|
||||
### 1. Metrics Collection
|
||||
- **JVM Metrics**: Automatically collect JVM runtime metrics including CPU, memory pools, garbage collection, threads, etc.
|
||||
- **JMX Metrics**: Define and collect JMX Bean metrics through configuration files
|
||||
- **Yammer Metrics**: Bridge existing Kafka Yammer metrics system to OpenTelemetry
|
||||
|
||||
### 2. Multiple Exporter Support
|
||||
- **Prometheus**: Expose metrics in Prometheus format through HTTP server
|
||||
- **OTLP**: Support both gRPC and HTTP/Protobuf protocols for exporting to OTLP backends
|
||||
- **S3**: Export metrics to S3-compatible object storage systems
|
||||
|
||||
### 3. Flexible Configuration
|
||||
- Support parameter settings through Properties configuration files
|
||||
- Configurable export intervals, compression methods, timeout values, etc.
|
||||
- Support metric cardinality limits to control memory usage
|
||||
|
||||
## Module Structure
|
||||
|
||||
```
|
||||
com.automq.opentelemetry/
|
||||
├── AutoMQTelemetryManager.java # Main management class for initialization and lifecycle
|
||||
├── TelemetryConfig.java # Configuration management class
|
||||
├── TelemetryConstants.java # Constants definition
|
||||
├── common/
|
||||
│ └── MetricsUtils.java # Metrics utility class
|
||||
├── exporter/
|
||||
│ ├── MetricsExporter.java # Exporter interface
|
||||
│ ├── MetricsExporterURI.java # URI parser
|
||||
│ <20><><EFBFBD>── OTLPMetricsExporter.java # OTLP exporter implementation
|
||||
│ ├── PrometheusMetricsExporter.java # Prometheus exporter implementation
|
||||
│ └── s3/ # S3 metrics exporter implementation
|
||||
│ ├── CompressionUtils.java # Utility for data compression
|
||||
│ ├── PrometheusUtils.java # Utilities for Prometheus format
|
||||
│ ├── S3MetricsConfig.java # Configuration interface
|
||||
│ ├── S3MetricsExporter.java # S3 metrics exporter implementation
|
||||
│ ├── S3MetricsExporterAdapter.java # Adapter to handle S3 metrics export
|
||||
│ ├── LeaderNodeSelector.java # Interface for node selection logic
|
||||
│ └── LeaderNodeSelectors.java # Factory for node selector implementations
|
||||
└── yammer/
|
||||
├── DeltaHistogram.java # Delta histogram implementation
|
||||
├── OTelMetricUtils.java # OpenTelemetry metrics utilities
|
||||
├── YammerMetricsProcessor.java # Yammer metrics processor
|
||||
└── YammerMetricsReporter.java # Yammer metrics reporter
|
||||
```
|
||||
|
||||
## Quick Start
|
||||
|
||||
### 1. Basic Usage
|
||||
|
||||
```java
|
||||
import com.automq.opentelemetry.AutoMQTelemetryManager;
|
||||
import com.automq.opentelemetry.exporter.MetricsExportConfig;
|
||||
|
||||
// Implement MetricsExportConfig
|
||||
public class MyMetricsExportConfig implements MetricsExportConfig {
|
||||
@Override
|
||||
public String clusterId() { return "my-cluster"; }
|
||||
|
||||
@Override
|
||||
public boolean isLeader() { return true; }
|
||||
|
||||
@Override
|
||||
public int nodeId() { return 1; }
|
||||
|
||||
@Override
|
||||
public ObjectStorage objectStorage() {
|
||||
// Return your object storage instance for S3 exports
|
||||
return myObjectStorage;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<Pair<String, String>> baseLabels() {
|
||||
return Arrays.asList(
|
||||
Pair.of("environment", "production"),
|
||||
Pair.of("region", "us-east-1")
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int intervalMs() { return 60000; } // 60 seconds
|
||||
}
|
||||
|
||||
// Create export configuration
|
||||
MetricsExportConfig config = new MyMetricsExportConfig();
|
||||
|
||||
// Initialize telemetry manager singleton
|
||||
AutoMQTelemetryManager manager = AutoMQTelemetryManager.initializeInstance(
|
||||
"prometheus://localhost:9090", // exporter URI
|
||||
"automq-kafka", // service name
|
||||
"broker-1", // instance ID
|
||||
config // export config
|
||||
);
|
||||
|
||||
// Start Yammer metrics reporting (optional)
|
||||
MetricsRegistry yammerRegistry = // Get Kafka's Yammer registry
|
||||
manager.startYammerMetricsReporter(yammerRegistry);
|
||||
|
||||
// Application running...
|
||||
|
||||
// Shutdown telemetry system
|
||||
AutoMQTelemetryManager.shutdownInstance();
|
||||
```
|
||||
|
||||
### 2. Get Meter Instance
|
||||
|
||||
```java
|
||||
// Get the singleton instance
|
||||
AutoMQTelemetryManager manager = AutoMQTelemetryManager.getInstance();
|
||||
|
||||
// Get Meter for custom metrics
|
||||
Meter meter = manager.getMeter();
|
||||
|
||||
// Create custom metrics
|
||||
LongCounter requestCounter = meter
|
||||
.counterBuilder("http_requests_total")
|
||||
.setDescription("Total number of HTTP requests")
|
||||
.build();
|
||||
|
||||
requestCounter.add(1, Attributes.of(AttributeKey.stringKey("method"), "GET"));
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
### Basic Configuration
|
||||
|
||||
Configuration is provided through the `MetricsExportConfig` interface and constructor parameters:
|
||||
|
||||
| Parameter | Description | Example |
|
||||
|-----------|-------------|---------|
|
||||
| `exporterUri` | Metrics exporter URI | `prometheus://localhost:9090` |
|
||||
| `serviceName` | Service name for telemetry | `automq-kafka` |
|
||||
| `instanceId` | Unique service instance ID | `broker-1` |
|
||||
| `config` | MetricsExportConfig implementation | See example above |
|
||||
|
||||
### Exporter Configuration
|
||||
|
||||
All configuration is done through the `MetricsExportConfig` interface and constructor parameters. Export intervals, compression settings, and other options are controlled through:
|
||||
|
||||
1. **Exporter URI**: Determines the export destination and protocol
|
||||
2. **MetricsExportConfig**: Provides cluster information, intervals, and base labels
|
||||
3. **Constructor parameters**: Service name and instance ID
|
||||
|
||||
#### Prometheus Exporter
|
||||
```java
|
||||
// Use prometheus:// URI scheme
|
||||
AutoMQTelemetryManager manager = AutoMQTelemetryManager.initializeInstance(
|
||||
"prometheus://localhost:9090",
|
||||
"automq-kafka",
|
||||
"broker-1",
|
||||
config
|
||||
);
|
||||
```
|
||||
|
||||
#### OTLP Exporter
|
||||
```java
|
||||
// Use otlp:// URI scheme with optional query parameters
|
||||
AutoMQTelemetryManager manager = AutoMQTelemetryManager.initializeInstance(
|
||||
"otlp://localhost:4317?protocol=grpc&compression=gzip&timeout=30000",
|
||||
"automq-kafka",
|
||||
"broker-1",
|
||||
config
|
||||
);
|
||||
```
|
||||
|
||||
#### S3 Metrics Exporter
|
||||
```java
|
||||
// Use s3:// URI scheme
|
||||
AutoMQTelemetryManager manager = AutoMQTelemetryManager.initializeInstance(
|
||||
"s3://access-key:secret-key@my-bucket.s3.amazonaws.com",
|
||||
"automq-kafka",
|
||||
"broker-1",
|
||||
config // config.clusterId(), nodeId(), isLeader() used for S3 export
|
||||
);
|
||||
```
|
||||
|
||||
Example usage with S3 exporter:
|
||||
|
||||
```java
|
||||
// Implementation for S3 export configuration
|
||||
public class S3MetricsExportConfig implements MetricsExportConfig {
|
||||
private final ObjectStorage objectStorage;
|
||||
|
||||
public S3MetricsExportConfig(ObjectStorage objectStorage) {
|
||||
this.objectStorage = objectStorage;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String clusterId() { return "my-kafka-cluster"; }
|
||||
|
||||
@Override
|
||||
public boolean isLeader() {
|
||||
// Only one node in the cluster should return true
|
||||
return isCurrentNodeLeader();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int nodeId() { return 1; }
|
||||
|
||||
@Override
|
||||
public ObjectStorage objectStorage() { return objectStorage; }
|
||||
|
||||
@Override
|
||||
public List<Pair<String, String>> baseLabels() {
|
||||
return Arrays.asList(Pair.of("environment", "production"));
|
||||
}
|
||||
|
||||
@Override
|
||||
public int intervalMs() { return 60000; }
|
||||
}
|
||||
|
||||
// Initialize telemetry manager with S3 export
|
||||
ObjectStorage objectStorage = // Create your object storage instance
|
||||
MetricsExportConfig config = new S3MetricsExportConfig(objectStorage);
|
||||
|
||||
AutoMQTelemetryManager manager = AutoMQTelemetryManager.initializeInstance(
|
||||
"s3://access-key:secret-key@my-bucket.s3.amazonaws.com",
|
||||
"automq-kafka",
|
||||
"broker-1",
|
||||
config
|
||||
);
|
||||
|
||||
// Application running...
|
||||
|
||||
// Shutdown telemetry system
|
||||
AutoMQTelemetryManager.shutdownInstance();
|
||||
```
|
||||
|
||||
### JMX Metrics Configuration
|
||||
|
||||
Define JMX metrics collection rules through YAML configuration files:
|
||||
|
||||
```java
|
||||
AutoMQTelemetryManager manager = AutoMQTelemetryManager.initializeInstance(
|
||||
exporterUri, serviceName, instanceId, config
|
||||
);
|
||||
|
||||
// Set JMX config paths after initialization
|
||||
manager.setJmxConfigPaths("/jmx-config.yaml,/kafka-jmx.yaml");
|
||||
```
|
||||
|
||||
#### Configuration File Requirements
|
||||
|
||||
1. **Directory Requirements**:
|
||||
- Configuration files must be placed in the project's classpath (e.g., `src/main/resources` directory)
|
||||
- Support subdirectory structure, e.g., `/config/jmx-metrics.yaml`
|
||||
|
||||
2. **Path Format**:
|
||||
- Paths must start with `/` to indicate starting from classpath root
|
||||
- Multiple configuration files separated by commas
|
||||
|
||||
3. **File Format**:
|
||||
- Use YAML format (`.yaml` or `.yml` extension)
|
||||
- Filenames can be customized, meaningful names are recommended
|
||||
|
||||
#### Recommended Directory Structure
|
||||
|
||||
```
|
||||
src/main/resources/
|
||||
├── jmx-kafka-broker.yaml # Kafka Broker metrics configuration
|
||||
├── jmx-kafka-consumer.yaml # Kafka Consumer metrics configuration
|
||||
├── jmx-kafka-producer.yaml # Kafka Producer metrics configuration
|
||||
└── config/
|
||||
├── custom-jmx.yaml # Custom JMX metrics configuration
|
||||
└── third-party-jmx.yaml # Third-party component JMX configuration
|
||||
```
|
||||
|
||||
JMX configuration file example (`jmx-config.yaml`):
|
||||
```yaml
|
||||
rules:
|
||||
- bean: kafka.server:type=BrokerTopicMetrics,name=MessagesInPerSec
|
||||
metricAttribute:
|
||||
name: kafka_server_broker_topic_messages_in_per_sec
|
||||
description: Messages in per second
|
||||
unit: "1/s"
|
||||
attributes:
|
||||
- name: topic
|
||||
value: topic
|
||||
```
|
||||
|
||||
## Supported Metric Types
|
||||
|
||||
### 1. JVM Metrics
|
||||
- Memory usage (heap memory, non-heap memory, memory pools)
|
||||
- CPU usage
|
||||
- Garbage collection statistics
|
||||
- Thread states
|
||||
|
||||
### 2. Kafka Metrics
|
||||
Through Yammer metrics bridging, supports the following types of Kafka metrics:
|
||||
- `BytesInPerSec` - Bytes input per second
|
||||
- `BytesOutPerSec` - Bytes output per second
|
||||
- `Size` - Log size (for identifying idle partitions)
|
||||
|
||||
### 3. Custom Metrics
|
||||
Support creating custom metrics through OpenTelemetry API:
|
||||
- Counter
|
||||
- Gauge
|
||||
- Histogram
|
||||
- UpDownCounter
|
||||
|
||||
## Best Practices
|
||||
|
||||
### 1. Production Environment Configuration
|
||||
|
||||
```java
|
||||
public class ProductionMetricsConfig implements MetricsExportConfig {
|
||||
@Override
|
||||
public String clusterId() { return "production-cluster"; }
|
||||
|
||||
@Override
|
||||
public boolean isLeader() {
|
||||
// Implement your leader election logic
|
||||
return isCurrentNodeController();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int nodeId() { return getCurrentNodeId(); }
|
||||
|
||||
@Override
|
||||
public ObjectStorage objectStorage() {
|
||||
return productionObjectStorage;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<Pair<String, String>> baseLabels() {
|
||||
return Arrays.asList(
|
||||
Pair.of("environment", "production"),
|
||||
Pair.of("region", System.getenv("AWS_REGION")),
|
||||
Pair.of("version", getApplicationVersion())
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int intervalMs() { return 60000; } // 1 minute
|
||||
}
|
||||
|
||||
// Initialize for production
|
||||
AutoMQTelemetryManager manager = AutoMQTelemetryManager.initializeInstance(
|
||||
"prometheus://0.0.0.0:9090", // Or S3 URI for object storage export
|
||||
"automq-kafka",
|
||||
System.getenv("HOSTNAME"),
|
||||
new ProductionMetricsConfig()
|
||||
);
|
||||
```
|
||||
|
||||
### 2. Development Environment Configuration
|
||||
|
||||
```java
|
||||
public class DevelopmentMetricsConfig implements MetricsExportConfig {
|
||||
@Override
|
||||
public String clusterId() { return "dev-cluster"; }
|
||||
|
||||
@Override
|
||||
public boolean isLeader() { return true; } // Single node in dev
|
||||
|
||||
@Override
|
||||
public int nodeId() { return 1; }
|
||||
|
||||
@Override
|
||||
public ObjectStorage objectStorage() { return null; } // Not needed for OTLP
|
||||
|
||||
@Override
|
||||
public List<Pair<String, String>> baseLabels() {
|
||||
return Arrays.asList(Pair.of("environment", "development"));
|
||||
}
|
||||
|
||||
@Override
|
||||
public int intervalMs() { return 10000; } // 10 seconds for faster feedback
|
||||
}
|
||||
|
||||
// Initialize for development
|
||||
AutoMQTelemetryManager manager = AutoMQTelemetryManager.initializeInstance(
|
||||
"otlp://localhost:4317",
|
||||
"automq-kafka-dev",
|
||||
"local-dev",
|
||||
new DevelopmentMetricsConfig()
|
||||
);
|
||||
```
|
||||
|
||||
### 3. Resource Management
|
||||
- Set appropriate metric cardinality limits to avoid memory leaks
|
||||
- Call `shutdown()` method when application closes to release resources
|
||||
- Monitor exporter health status
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
1. **Metrics not exported**
|
||||
- Check if exporter URI passed to `initializeInstance()` is correct
|
||||
- Verify target endpoint is reachable
|
||||
- Check error messages in logs
|
||||
- Ensure `MetricsExportConfig.intervalMs()` returns reasonable value
|
||||
|
||||
2. **JMX metrics missing**
|
||||
- Confirm JMX configuration file path set via `setJmxConfigPaths()` is correct
|
||||
- Check YAML configuration file format
|
||||
- Verify JMX Bean exists
|
||||
- Ensure files are in classpath
|
||||
|
||||
3. **High memory usage**
|
||||
- Implement cardinality limits in your `MetricsExportConfig`
|
||||
- Check for high cardinality labels in `baseLabels()`
|
||||
- Consider increasing export interval via `intervalMs()`
|
||||
|
||||
### Logging Configuration
|
||||
|
||||
Enable debug logging for more information using your logging framework configuration (e.g., logback.xml, log4j2.xml):
|
||||
|
||||
```xml
|
||||
<!-- For Logback -->
|
||||
<logger name="com.automq.opentelemetry" level="DEBUG" />
|
||||
<logger name="io.opentelemetry" level="INFO" />
|
||||
```
|
||||
|
||||
## Dependencies
|
||||
|
||||
- Java 8+
|
||||
- OpenTelemetry SDK 1.30+
|
||||
- Apache Commons Lang3
|
||||
- SLF4J logging framework
|
||||
|
||||
## License
|
||||
|
||||
This module is open source under the Apache License 2.0.
|
||||
|
|
@ -1,330 +0,0 @@
|
|||
/*
|
||||
* Copyright 2025, AutoMQ HK Limited.
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package com.automq.opentelemetry;
|
||||
|
||||
import com.automq.opentelemetry.exporter.MetricsExportConfig;
|
||||
import com.automq.opentelemetry.exporter.MetricsExporter;
|
||||
import com.automq.opentelemetry.exporter.MetricsExporterURI;
|
||||
import com.automq.opentelemetry.yammer.YammerMetricsReporter;
|
||||
import com.yammer.metrics.core.MetricsRegistry;
|
||||
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.slf4j.bridge.SLF4JBridgeHandler;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.net.InetAddress;
|
||||
import java.net.UnknownHostException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import io.opentelemetry.api.OpenTelemetry;
|
||||
import io.opentelemetry.api.baggage.propagation.W3CBaggagePropagator;
|
||||
import io.opentelemetry.api.common.Attributes;
|
||||
import io.opentelemetry.api.common.AttributesBuilder;
|
||||
import io.opentelemetry.api.metrics.Meter;
|
||||
import io.opentelemetry.api.trace.propagation.W3CTraceContextPropagator;
|
||||
import io.opentelemetry.context.propagation.ContextPropagators;
|
||||
import io.opentelemetry.context.propagation.TextMapPropagator;
|
||||
import io.opentelemetry.instrumentation.jmx.engine.JmxMetricInsight;
|
||||
import io.opentelemetry.instrumentation.jmx.engine.MetricConfiguration;
|
||||
import io.opentelemetry.instrumentation.jmx.yaml.RuleParser;
|
||||
import io.opentelemetry.instrumentation.runtimemetrics.java8.Cpu;
|
||||
import io.opentelemetry.instrumentation.runtimemetrics.java8.GarbageCollector;
|
||||
import io.opentelemetry.instrumentation.runtimemetrics.java8.MemoryPools;
|
||||
import io.opentelemetry.instrumentation.runtimemetrics.java8.Threads;
|
||||
import io.opentelemetry.sdk.OpenTelemetrySdk;
|
||||
import io.opentelemetry.sdk.metrics.SdkMeterProvider;
|
||||
import io.opentelemetry.sdk.metrics.SdkMeterProviderBuilder;
|
||||
import io.opentelemetry.sdk.metrics.export.MetricReader;
|
||||
import io.opentelemetry.sdk.metrics.internal.SdkMeterProviderUtil;
|
||||
import io.opentelemetry.sdk.resources.Resource;
|
||||
|
||||
/**
|
||||
* The main manager for AutoMQ telemetry.
|
||||
* This class is responsible for initializing, configuring, and managing the lifecycle of all
|
||||
* telemetry components, including the OpenTelemetry SDK, metric exporters, and various metric sources.
|
||||
*/
|
||||
public class AutoMQTelemetryManager {
|
||||
private static final Logger LOGGER = LoggerFactory.getLogger(AutoMQTelemetryManager.class);
|
||||
|
||||
// Singleton instance support
|
||||
private static volatile AutoMQTelemetryManager instance;
|
||||
private static final Object LOCK = new Object();
|
||||
|
||||
private final String exporterUri;
|
||||
private final String serviceName;
|
||||
private final String instanceId;
|
||||
private final MetricsExportConfig metricsExportConfig;
|
||||
private final List<MetricReader> metricReaders = new ArrayList<>();
|
||||
private final List<AutoCloseable> autoCloseableList;
|
||||
private OpenTelemetrySdk openTelemetrySdk;
|
||||
private YammerMetricsReporter yammerReporter;
|
||||
|
||||
private int metricCardinalityLimit = TelemetryConstants.DEFAULT_METRIC_CARDINALITY_LIMIT;
|
||||
private String jmxConfigPath;
|
||||
|
||||
/**
|
||||
* Constructs a new Telemetry Manager with the given configuration.
|
||||
*
|
||||
* @param exporterUri The metrics exporter URI.
|
||||
* @param serviceName The service name to be used in telemetry data.
|
||||
* @param instanceId The unique instance ID for this service instance.
|
||||
* @param metricsExportConfig The metrics configuration.
|
||||
*/
|
||||
public AutoMQTelemetryManager(String exporterUri, String serviceName, String instanceId, MetricsExportConfig metricsExportConfig) {
|
||||
this.exporterUri = exporterUri;
|
||||
this.serviceName = serviceName;
|
||||
this.instanceId = instanceId;
|
||||
this.metricsExportConfig = metricsExportConfig;
|
||||
this.autoCloseableList = new ArrayList<>();
|
||||
// Redirect JUL from OpenTelemetry SDK to SLF4J for unified logging
|
||||
SLF4JBridgeHandler.removeHandlersForRootLogger();
|
||||
SLF4JBridgeHandler.install();
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the singleton instance of AutoMQTelemetryManager.
|
||||
* Returns null if no instance has been initialized.
|
||||
*
|
||||
* @return the singleton instance, or null if not initialized
|
||||
*/
|
||||
public static AutoMQTelemetryManager getInstance() {
|
||||
return instance;
|
||||
}
|
||||
|
||||
/**
|
||||
* Initializes the singleton instance with the given configuration.
|
||||
* This method should be called before any other components try to access the instance.
|
||||
*
|
||||
* @param exporterUri The metrics exporter URI.
|
||||
* @param serviceName The service name to be used in telemetry data.
|
||||
* @param instanceId The unique instance ID for this service instance.
|
||||
* @param metricsExportConfig The metrics configuration.
|
||||
* @return the initialized singleton instance
|
||||
*/
|
||||
public static AutoMQTelemetryManager initializeInstance(String exporterUri, String serviceName, String instanceId, MetricsExportConfig metricsExportConfig) {
|
||||
if (instance == null) {
|
||||
synchronized (LOCK) {
|
||||
if (instance == null) {
|
||||
AutoMQTelemetryManager newInstance = new AutoMQTelemetryManager(exporterUri, serviceName, instanceId, metricsExportConfig);
|
||||
newInstance.init();
|
||||
instance = newInstance;
|
||||
LOGGER.info("AutoMQTelemetryManager singleton instance initialized");
|
||||
}
|
||||
}
|
||||
}
|
||||
return instance;
|
||||
}
|
||||
|
||||
/**
|
||||
* Shuts down the singleton instance and releases all resources.
|
||||
*/
|
||||
public static void shutdownInstance() {
|
||||
if (instance != null) {
|
||||
synchronized (LOCK) {
|
||||
if (instance != null) {
|
||||
instance.shutdown();
|
||||
instance = null;
|
||||
LOGGER.info("AutoMQTelemetryManager singleton instance shutdown");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Initializes the telemetry system. This method sets up the OpenTelemetry SDK,
|
||||
* configures exporters, and registers JVM and JMX metrics.
|
||||
*/
|
||||
public void init() {
|
||||
SdkMeterProvider meterProvider = buildMeterProvider();
|
||||
|
||||
this.openTelemetrySdk = OpenTelemetrySdk.builder()
|
||||
.setMeterProvider(meterProvider)
|
||||
.setPropagators(ContextPropagators.create(TextMapPropagator.composite(
|
||||
W3CTraceContextPropagator.getInstance(), W3CBaggagePropagator.getInstance())))
|
||||
.buildAndRegisterGlobal();
|
||||
|
||||
// Register JVM and JMX metrics
|
||||
registerJvmMetrics(openTelemetrySdk);
|
||||
registerJmxMetrics(openTelemetrySdk);
|
||||
|
||||
LOGGER.info("AutoMQ Telemetry Manager initialized successfully.");
|
||||
}
|
||||
|
||||
private SdkMeterProvider buildMeterProvider() {
|
||||
String hostName;
|
||||
try {
|
||||
hostName = InetAddress.getLocalHost().getHostName();
|
||||
} catch (UnknownHostException e) {
|
||||
hostName = "unknown-host";
|
||||
}
|
||||
AttributesBuilder attrsBuilder = Attributes.builder()
|
||||
.put(TelemetryConstants.SERVICE_NAME_KEY, serviceName)
|
||||
.put(TelemetryConstants.SERVICE_INSTANCE_ID_KEY, instanceId)
|
||||
.put(TelemetryConstants.HOST_NAME_KEY, hostName)
|
||||
// Add attributes for Prometheus compatibility
|
||||
.put(TelemetryConstants.PROMETHEUS_JOB_KEY, serviceName)
|
||||
.put(TelemetryConstants.PROMETHEUS_INSTANCE_KEY, instanceId);
|
||||
|
||||
for (Pair<String, String> label : metricsExportConfig.baseLabels()) {
|
||||
attrsBuilder.put(label.getKey(), label.getValue());
|
||||
}
|
||||
|
||||
Resource resource = Resource.getDefault().merge(Resource.create(attrsBuilder.build()));
|
||||
SdkMeterProviderBuilder meterProviderBuilder = SdkMeterProvider.builder().setResource(resource);
|
||||
|
||||
// Configure exporters from URI
|
||||
MetricsExporterURI exporterURI = buildMetricsExporterURI(exporterUri, metricsExportConfig);
|
||||
for (MetricsExporter exporter : exporterURI.getMetricsExporters()) {
|
||||
MetricReader reader = exporter.asMetricReader();
|
||||
metricReaders.add(reader);
|
||||
SdkMeterProviderUtil.registerMetricReaderWithCardinalitySelector(meterProviderBuilder, reader,
|
||||
instrumentType -> metricCardinalityLimit);
|
||||
}
|
||||
|
||||
return meterProviderBuilder.build();
|
||||
}
|
||||
|
||||
protected MetricsExporterURI buildMetricsExporterURI(String exporterUri, MetricsExportConfig metricsExportConfig) {
|
||||
return MetricsExporterURI.parse(exporterUri, metricsExportConfig);
|
||||
}
|
||||
|
||||
private void registerJvmMetrics(OpenTelemetry openTelemetry) {
|
||||
autoCloseableList.addAll(MemoryPools.registerObservers(openTelemetry));
|
||||
autoCloseableList.addAll(Cpu.registerObservers(openTelemetry));
|
||||
autoCloseableList.addAll(GarbageCollector.registerObservers(openTelemetry));
|
||||
autoCloseableList.addAll(Threads.registerObservers(openTelemetry));
|
||||
LOGGER.info("JVM metrics registered.");
|
||||
}
|
||||
|
||||
@SuppressWarnings({"NP_LOAD_OF_KNOWN_NULL_VALUE", "RCN_REDUNDANT_NULLCHECK_OF_NULL_VALUE"})
|
||||
private void registerJmxMetrics(OpenTelemetry openTelemetry) {
|
||||
List<String> jmxConfigPaths = getJmxConfigPaths();
|
||||
if (jmxConfigPaths.isEmpty()) {
|
||||
LOGGER.info("No JMX metric config paths provided, skipping JMX metrics registration.");
|
||||
return;
|
||||
}
|
||||
|
||||
JmxMetricInsight jmxMetricInsight = JmxMetricInsight.createService(openTelemetry, metricsExportConfig.intervalMs());
|
||||
MetricConfiguration metricConfig = new MetricConfiguration();
|
||||
|
||||
for (String path : jmxConfigPaths) {
|
||||
try (InputStream ins = this.getClass().getResourceAsStream(path)) {
|
||||
if (ins == null) {
|
||||
LOGGER.error("JMX config file not found in classpath: {}", path);
|
||||
continue;
|
||||
}
|
||||
RuleParser parser = RuleParser.get();
|
||||
parser.addMetricDefsTo(metricConfig, ins, path);
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("Failed to parse JMX config file: {}", path, e);
|
||||
}
|
||||
}
|
||||
|
||||
jmxMetricInsight.start(metricConfig);
|
||||
// JmxMetricInsight doesn't implement Closeable, but we can create a wrapper
|
||||
|
||||
LOGGER.info("JMX metrics registered with config paths: {}", jmxConfigPaths);
|
||||
}
|
||||
|
||||
public List<String> getJmxConfigPaths() {
|
||||
if (StringUtils.isEmpty(jmxConfigPath)) {
|
||||
return Collections.emptyList();
|
||||
}
|
||||
return Stream.of(jmxConfigPath.split(","))
|
||||
.map(String::trim)
|
||||
.filter(s -> !s.isEmpty())
|
||||
.collect(Collectors.toList());
|
||||
}
|
||||
|
||||
/**
|
||||
* Starts reporting metrics from a given Yammer MetricsRegistry.
|
||||
*
|
||||
* @param registry The Yammer registry to bridge metrics from.
|
||||
*/
|
||||
public void startYammerMetricsReporter(MetricsRegistry registry) {
|
||||
if (this.openTelemetrySdk == null) {
|
||||
throw new IllegalStateException("TelemetryManager is not initialized. Call init() first.");
|
||||
}
|
||||
if (registry == null) {
|
||||
LOGGER.warn("Yammer MetricsRegistry is null, skipping reporter start.");
|
||||
return;
|
||||
}
|
||||
this.yammerReporter = new YammerMetricsReporter(registry);
|
||||
this.yammerReporter.start(getMeter());
|
||||
}
|
||||
|
||||
public void shutdown() {
|
||||
autoCloseableList.forEach(autoCloseable -> {
|
||||
try {
|
||||
autoCloseable.close();
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("Failed to close auto closeable", e);
|
||||
}
|
||||
});
|
||||
metricReaders.forEach(metricReader -> {
|
||||
metricReader.forceFlush();
|
||||
try {
|
||||
metricReader.close();
|
||||
} catch (IOException e) {
|
||||
LOGGER.error("Failed to close metric reader", e);
|
||||
}
|
||||
});
|
||||
if (openTelemetrySdk != null) {
|
||||
openTelemetrySdk.close();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* get YammerMetricsReporter instance.
|
||||
*
|
||||
* @return The YammerMetricsReporter instance.
|
||||
*/
|
||||
public YammerMetricsReporter getYammerReporter() {
|
||||
return this.yammerReporter;
|
||||
}
|
||||
|
||||
public void setMetricCardinalityLimit(int limit) {
|
||||
this.metricCardinalityLimit = limit;
|
||||
}
|
||||
|
||||
public void setJmxConfigPaths(String jmxConfigPaths) {
|
||||
this.jmxConfigPath = jmxConfigPaths;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the default meter from the initialized OpenTelemetry SDK.
|
||||
*
|
||||
* @return The meter instance.
|
||||
*/
|
||||
public Meter getMeter() {
|
||||
if (this.openTelemetrySdk == null) {
|
||||
throw new IllegalStateException("TelemetryManager is not initialized. Call init() first.");
|
||||
}
|
||||
return this.openTelemetrySdk.getMeter(TelemetryConstants.TELEMETRY_SCOPE_NAME);
|
||||
}
|
||||
}
|
||||
|
|
@ -1,54 +0,0 @@
|
|||
/*
|
||||
* Copyright 2025, AutoMQ HK Limited.
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package com.automq.opentelemetry;
|
||||
|
||||
import io.opentelemetry.api.common.AttributeKey;
|
||||
|
||||
/**
|
||||
* Constants for telemetry, including configuration keys, attribute keys, and default values.
|
||||
*/
|
||||
public class TelemetryConstants {
|
||||
|
||||
//################################################################
|
||||
// Service and Resource Attributes
|
||||
//################################################################
|
||||
public static final String SERVICE_NAME_KEY = "service.name";
|
||||
public static final String SERVICE_INSTANCE_ID_KEY = "service.instance.id";
|
||||
public static final String HOST_NAME_KEY = "host.name";
|
||||
public static final String TELEMETRY_SCOPE_NAME = "automq_for_kafka";
|
||||
|
||||
/**
|
||||
* The cardinality limit for any single metric.
|
||||
*/
|
||||
public static final String METRIC_CARDINALITY_LIMIT_KEY = "automq.telemetry.metric.cardinality.limit";
|
||||
public static final int DEFAULT_METRIC_CARDINALITY_LIMIT = 20000;
|
||||
|
||||
//################################################################
|
||||
// Prometheus specific Attributes, for compatibility
|
||||
//################################################################
|
||||
public static final String PROMETHEUS_JOB_KEY = "job";
|
||||
public static final String PROMETHEUS_INSTANCE_KEY = "instance";
|
||||
|
||||
//################################################################
|
||||
// Custom Kafka-related Attribute Keys
|
||||
//################################################################
|
||||
public static final AttributeKey<Long> START_OFFSET_KEY = AttributeKey.longKey("startOffset");
|
||||
public static final AttributeKey<Long> END_OFFSET_KEY = AttributeKey.longKey("endOffset");
|
||||
}
|
||||
|
|
@ -1,68 +0,0 @@
|
|||
/*
|
||||
* Copyright 2025, AutoMQ HK Limited.
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package com.automq.opentelemetry.exporter;
|
||||
|
||||
import com.automq.stream.s3.operator.ObjectStorage;
|
||||
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Configuration interface for metrics exporter.
|
||||
*/
|
||||
public interface MetricsExportConfig {
|
||||
|
||||
/**
|
||||
* Get the cluster ID.
|
||||
* @return The cluster ID.
|
||||
*/
|
||||
String clusterId();
|
||||
|
||||
/**
|
||||
* Check if the current node is a primary node for metrics upload.
|
||||
* @return True if the current node should upload metrics, false otherwise.
|
||||
*/
|
||||
boolean isLeader();
|
||||
|
||||
/**
|
||||
* Get the node ID.
|
||||
* @return The node ID.
|
||||
*/
|
||||
int nodeId();
|
||||
|
||||
/**
|
||||
* Get the object storage instance.
|
||||
* @return The object storage instance.
|
||||
*/
|
||||
ObjectStorage objectStorage();
|
||||
|
||||
/**
|
||||
* Get the base labels to include in all metrics.
|
||||
* @return The base labels.
|
||||
*/
|
||||
List<Pair<String, String>> baseLabels();
|
||||
|
||||
/**
|
||||
* Get the interval in milliseconds for metrics export.
|
||||
* @return The interval in milliseconds.
|
||||
*/
|
||||
int intervalMs();
|
||||
}
|
||||
|
|
@ -1,47 +0,0 @@
|
|||
/*
|
||||
* Copyright 2025, AutoMQ HK Limited.
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package com.automq.opentelemetry.exporter;
|
||||
|
||||
import java.net.URI;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Service Provider Interface that allows extending the available metrics exporters
|
||||
* without modifying the core AutoMQ OpenTelemetry module.
|
||||
*/
|
||||
public interface MetricsExporterProvider {
|
||||
|
||||
/**
|
||||
* @param scheme exporter scheme (e.g. "rw")
|
||||
* @return true if this provider can create an exporter for the supplied scheme
|
||||
*/
|
||||
boolean supports(String scheme);
|
||||
|
||||
/**
|
||||
* Creates a metrics exporter for the provided URI.
|
||||
*
|
||||
* @param config metrics configuration
|
||||
* @param uri original exporter URI
|
||||
* @param queryParameters parsed query parameters from the URI
|
||||
* @return a MetricsExporter instance, or {@code null} if unable to create one
|
||||
*/
|
||||
MetricsExporter create(MetricsExportConfig config, URI uri, Map<String, List<String>> queryParameters);
|
||||
}
|
||||
|
|
@ -1,220 +0,0 @@
|
|||
/*
|
||||
* Copyright 2025, AutoMQ HK Limited.
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package com.automq.opentelemetry.exporter;
|
||||
|
||||
import com.automq.opentelemetry.common.OTLPCompressionType;
|
||||
import com.automq.opentelemetry.common.OTLPProtocol;
|
||||
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.net.URI;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.ServiceLoader;
|
||||
|
||||
/**
|
||||
* Parses the exporter URI and creates the corresponding MetricsExporter instances.
|
||||
*/
|
||||
public class MetricsExporterURI {
|
||||
private static final Logger LOGGER = LoggerFactory.getLogger(MetricsExporterURI.class);
|
||||
|
||||
private static final List<MetricsExporterProvider> PROVIDERS;
|
||||
|
||||
static {
|
||||
List<MetricsExporterProvider> providers = new ArrayList<>();
|
||||
ServiceLoader.load(MetricsExporterProvider.class).forEach(providers::add);
|
||||
PROVIDERS = Collections.unmodifiableList(providers);
|
||||
if (!PROVIDERS.isEmpty()) {
|
||||
LOGGER.info("Loaded {} telemetry exporter providers", PROVIDERS.size());
|
||||
}
|
||||
}
|
||||
|
||||
private final List<MetricsExporter> metricsExporters;
|
||||
|
||||
private MetricsExporterURI(List<MetricsExporter> metricsExporters) {
|
||||
this.metricsExporters = metricsExporters != null ? metricsExporters : new ArrayList<>();
|
||||
}
|
||||
|
||||
public List<MetricsExporter> getMetricsExporters() {
|
||||
return metricsExporters;
|
||||
}
|
||||
|
||||
public static MetricsExporterURI parse(String uriStr, MetricsExportConfig config) {
|
||||
LOGGER.info("Parsing metrics exporter URI: {}", uriStr);
|
||||
if (StringUtils.isBlank(uriStr)) {
|
||||
LOGGER.info("Metrics exporter URI is not configured, no metrics will be exported.");
|
||||
return new MetricsExporterURI(Collections.emptyList());
|
||||
}
|
||||
|
||||
// Support multiple exporters separated by comma
|
||||
String[] exporterUris = uriStr.split(",");
|
||||
if (exporterUris.length == 0) {
|
||||
return new MetricsExporterURI(Collections.emptyList());
|
||||
}
|
||||
|
||||
List<MetricsExporter> exporters = new ArrayList<>();
|
||||
for (String uri : exporterUris) {
|
||||
if (StringUtils.isBlank(uri)) {
|
||||
continue;
|
||||
}
|
||||
MetricsExporter exporter = parseExporter(config, uri.trim());
|
||||
if (exporter != null) {
|
||||
exporters.add(exporter);
|
||||
}
|
||||
}
|
||||
return new MetricsExporterURI(exporters);
|
||||
}
|
||||
|
||||
public static MetricsExporter parseExporter(MetricsExportConfig config, String uriStr) {
|
||||
try {
|
||||
URI uri = new URI(uriStr);
|
||||
String type = uri.getScheme();
|
||||
if (StringUtils.isBlank(type)) {
|
||||
LOGGER.error("Invalid metrics exporter URI: {}, exporter scheme is missing", uriStr);
|
||||
throw new IllegalArgumentException("Invalid metrics exporter URI: " + uriStr);
|
||||
}
|
||||
|
||||
Map<String, List<String>> queries = parseQueryParameters(uri);
|
||||
return parseExporter(config, type, queries, uri);
|
||||
} catch (Exception e) {
|
||||
LOGGER.warn("Parse metrics exporter URI {} failed", uriStr, e);
|
||||
throw new IllegalArgumentException("Invalid metrics exporter URI: " + uriStr, e);
|
||||
}
|
||||
}
|
||||
|
||||
public static MetricsExporter parseExporter(MetricsExportConfig config, String type, Map<String, List<String>> queries, URI uri) {
|
||||
MetricsExporterType exporterType = MetricsExporterType.fromString(type);
|
||||
switch (exporterType) {
|
||||
case PROMETHEUS:
|
||||
return buildPrometheusExporter(config, queries, uri);
|
||||
case OTLP:
|
||||
return buildOtlpExporter(config, queries, uri);
|
||||
case OPS:
|
||||
return buildS3MetricsExporter(config, uri);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
MetricsExporterProvider provider = findProvider(type);
|
||||
if (provider != null) {
|
||||
MetricsExporter exporter = provider.create(config, uri, queries);
|
||||
if (exporter != null) {
|
||||
return exporter;
|
||||
}
|
||||
}
|
||||
|
||||
LOGGER.warn("Unsupported metrics exporter type: {}", type);
|
||||
return null;
|
||||
}
|
||||
|
||||
private static MetricsExporter buildPrometheusExporter(MetricsExportConfig config, Map<String, List<String>> queries, URI uri) {
|
||||
// Use query parameters if available, otherwise fall back to URI authority or config defaults
|
||||
String host = getStringFromQuery(queries, "host", uri.getHost());
|
||||
if (StringUtils.isBlank(host)) {
|
||||
host = "localhost";
|
||||
}
|
||||
|
||||
int port = uri.getPort();
|
||||
if (port <= 0) {
|
||||
String portStr = getStringFromQuery(queries, "port", null);
|
||||
if (StringUtils.isNotBlank(portStr)) {
|
||||
try {
|
||||
port = Integer.parseInt(portStr);
|
||||
} catch (NumberFormatException e) {
|
||||
LOGGER.warn("Invalid port in query parameters: {}, using default", portStr);
|
||||
port = 9090;
|
||||
}
|
||||
} else {
|
||||
port = 9090;
|
||||
}
|
||||
}
|
||||
|
||||
return new PrometheusMetricsExporter(host, port, config.baseLabels());
|
||||
}
|
||||
|
||||
private static MetricsExporter buildOtlpExporter(MetricsExportConfig config, Map<String, List<String>> queries, URI uri) {
|
||||
// Get endpoint from query parameters or construct from URI
|
||||
String endpoint = getStringFromQuery(queries, "endpoint", null);
|
||||
if (StringUtils.isBlank(endpoint)) {
|
||||
endpoint = uri.getScheme() + "://" + uri.getAuthority();
|
||||
}
|
||||
|
||||
// Get protocol from query parameters or config
|
||||
String protocol = getStringFromQuery(queries, "protocol", OTLPProtocol.GRPC.getProtocol());
|
||||
|
||||
// Get compression from query parameters or config
|
||||
String compression = getStringFromQuery(queries, "compression", OTLPCompressionType.NONE.getType());
|
||||
|
||||
return new OTLPMetricsExporter(config.intervalMs(), endpoint, protocol, compression);
|
||||
}
|
||||
|
||||
private static MetricsExporter buildS3MetricsExporter(MetricsExportConfig config, URI uri) {
|
||||
LOGGER.info("Creating S3 metrics exporter from URI: {}", uri);
|
||||
if (config.objectStorage() == null) {
|
||||
LOGGER.warn("No object storage configured, skip s3 metrics exporter creation.");
|
||||
return null;
|
||||
}
|
||||
// Create the S3MetricsExporterAdapter with appropriate configuration
|
||||
return new com.automq.opentelemetry.exporter.s3.S3MetricsExporterAdapter(config);
|
||||
}
|
||||
|
||||
private static Map<String, List<String>> parseQueryParameters(URI uri) {
|
||||
Map<String, List<String>> queries = new HashMap<>();
|
||||
String query = uri.getQuery();
|
||||
if (StringUtils.isNotBlank(query)) {
|
||||
String[] pairs = query.split("&");
|
||||
for (String pair : pairs) {
|
||||
String[] keyValue = pair.split("=", 2);
|
||||
if (keyValue.length == 2) {
|
||||
String key = keyValue[0];
|
||||
String value = keyValue[1];
|
||||
queries.computeIfAbsent(key, k -> new ArrayList<>()).add(value);
|
||||
}
|
||||
}
|
||||
}
|
||||
return queries;
|
||||
}
|
||||
|
||||
private static String getStringFromQuery(Map<String, List<String>> queries, String key, String defaultValue) {
|
||||
List<String> values = queries.get(key);
|
||||
if (values != null && !values.isEmpty()) {
|
||||
return values.get(0);
|
||||
}
|
||||
return defaultValue;
|
||||
}
|
||||
|
||||
private static MetricsExporterProvider findProvider(String scheme) {
|
||||
for (MetricsExporterProvider provider : PROVIDERS) {
|
||||
try {
|
||||
if (provider.supports(scheme)) {
|
||||
return provider;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOGGER.warn("Telemetry exporter provider {} failed to evaluate support for scheme {}", provider.getClass().getName(), scheme, e);
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
|
@ -1,86 +0,0 @@
|
|||
/*
|
||||
* Copyright 2025, AutoMQ HK Limited.
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package com.automq.opentelemetry.exporter.s3;
|
||||
|
||||
import com.automq.stream.s3.ByteBufAlloc;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.util.zip.GZIPInputStream;
|
||||
import java.util.zip.GZIPOutputStream;
|
||||
|
||||
import io.netty.buffer.ByteBuf;
|
||||
|
||||
/**
|
||||
* Utility class for data compression and decompression.
|
||||
*/
|
||||
public class CompressionUtils {
|
||||
|
||||
/**
|
||||
* Compress a ByteBuf using GZIP.
|
||||
*
|
||||
* @param input The input ByteBuf to compress.
|
||||
* @return A new ByteBuf containing the compressed data.
|
||||
* @throws IOException If an I/O error occurs during compression.
|
||||
*/
|
||||
public static ByteBuf compress(ByteBuf input) throws IOException {
|
||||
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
|
||||
GZIPOutputStream gzipOutputStream = new GZIPOutputStream(byteArrayOutputStream);
|
||||
|
||||
byte[] buffer = new byte[input.readableBytes()];
|
||||
input.readBytes(buffer);
|
||||
gzipOutputStream.write(buffer);
|
||||
gzipOutputStream.close();
|
||||
|
||||
ByteBuf compressed = ByteBufAlloc.byteBuffer(byteArrayOutputStream.size());
|
||||
compressed.writeBytes(byteArrayOutputStream.toByteArray());
|
||||
return compressed;
|
||||
}
|
||||
|
||||
/**
|
||||
* Decompress a GZIP-compressed ByteBuf.
|
||||
*
|
||||
* @param input The compressed ByteBuf to decompress.
|
||||
* @return A new ByteBuf containing the decompressed data.
|
||||
* @throws IOException If an I/O error occurs during decompression.
|
||||
*/
|
||||
public static ByteBuf decompress(ByteBuf input) throws IOException {
|
||||
byte[] compressedData = new byte[input.readableBytes()];
|
||||
input.readBytes(compressedData);
|
||||
ByteArrayInputStream byteArrayInputStream = new ByteArrayInputStream(compressedData);
|
||||
GZIPInputStream gzipInputStream = new GZIPInputStream(byteArrayInputStream);
|
||||
|
||||
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
|
||||
byte[] buffer = new byte[1024];
|
||||
int bytesRead;
|
||||
while ((bytesRead = gzipInputStream.read(buffer)) != -1) {
|
||||
byteArrayOutputStream.write(buffer, 0, bytesRead);
|
||||
}
|
||||
|
||||
gzipInputStream.close();
|
||||
byteArrayOutputStream.close();
|
||||
|
||||
byte[] uncompressedData = byteArrayOutputStream.toByteArray();
|
||||
ByteBuf output = ByteBufAlloc.byteBuffer(uncompressedData.length);
|
||||
output.writeBytes(uncompressedData);
|
||||
return output;
|
||||
}
|
||||
}
|
||||
|
|
@ -1,276 +0,0 @@
|
|||
/*
|
||||
* Copyright 2025, AutoMQ HK Limited.
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package com.automq.opentelemetry.exporter.s3;
|
||||
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
|
||||
import java.util.Locale;
|
||||
|
||||
/**
|
||||
* Utility class for Prometheus metric and label naming.
|
||||
*/
|
||||
public class PrometheusUtils {
|
||||
private static final String TOTAL_SUFFIX = "_total";
|
||||
|
||||
/**
|
||||
* Get the Prometheus unit from the OpenTelemetry unit.
|
||||
*
|
||||
* @param unit The OpenTelemetry unit.
|
||||
* @return The Prometheus unit.
|
||||
*/
|
||||
public static String getPrometheusUnit(String unit) {
|
||||
if (unit.contains("{")) {
|
||||
return "";
|
||||
}
|
||||
switch (unit) {
|
||||
// Time
|
||||
case "d":
|
||||
return "days";
|
||||
case "h":
|
||||
return "hours";
|
||||
case "min":
|
||||
return "minutes";
|
||||
case "s":
|
||||
return "seconds";
|
||||
case "ms":
|
||||
return "milliseconds";
|
||||
case "us":
|
||||
return "microseconds";
|
||||
case "ns":
|
||||
return "nanoseconds";
|
||||
// Bytes
|
||||
case "By":
|
||||
return "bytes";
|
||||
case "KiBy":
|
||||
return "kibibytes";
|
||||
case "MiBy":
|
||||
return "mebibytes";
|
||||
case "GiBy":
|
||||
return "gibibytes";
|
||||
case "TiBy":
|
||||
return "tibibytes";
|
||||
case "KBy":
|
||||
return "kilobytes";
|
||||
case "MBy":
|
||||
return "megabytes";
|
||||
case "GBy":
|
||||
return "gigabytes";
|
||||
case "TBy":
|
||||
return "terabytes";
|
||||
// SI
|
||||
case "m":
|
||||
return "meters";
|
||||
case "V":
|
||||
return "volts";
|
||||
case "A":
|
||||
return "amperes";
|
||||
case "J":
|
||||
return "joules";
|
||||
case "W":
|
||||
return "watts";
|
||||
case "g":
|
||||
return "grams";
|
||||
// Misc
|
||||
case "Cel":
|
||||
return "celsius";
|
||||
case "Hz":
|
||||
return "hertz";
|
||||
case "1":
|
||||
return "";
|
||||
case "%":
|
||||
return "percent";
|
||||
// Rate units (per second)
|
||||
case "1/s":
|
||||
return "per_second";
|
||||
case "By/s":
|
||||
return "bytes_per_second";
|
||||
case "KiBy/s":
|
||||
return "kibibytes_per_second";
|
||||
case "MiBy/s":
|
||||
return "mebibytes_per_second";
|
||||
case "GiBy/s":
|
||||
return "gibibytes_per_second";
|
||||
case "KBy/s":
|
||||
return "kilobytes_per_second";
|
||||
case "MBy/s":
|
||||
return "megabytes_per_second";
|
||||
case "GBy/s":
|
||||
return "gigabytes_per_second";
|
||||
// Rate units (per minute)
|
||||
case "1/min":
|
||||
return "per_minute";
|
||||
case "By/min":
|
||||
return "bytes_per_minute";
|
||||
// Rate units (per hour)
|
||||
case "1/h":
|
||||
return "per_hour";
|
||||
case "By/h":
|
||||
return "bytes_per_hour";
|
||||
// Rate units (per day)
|
||||
case "1/d":
|
||||
return "per_day";
|
||||
case "By/d":
|
||||
return "bytes_per_day";
|
||||
default:
|
||||
return unit;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Map a metric name to a Prometheus-compatible name.
|
||||
*
|
||||
* @param name The original metric name.
|
||||
* @param unit The metric unit.
|
||||
* @param isCounter Whether the metric is a counter.
|
||||
* @param isGauge Whether the metric is a gauge.
|
||||
* @return The Prometheus-compatible metric name.
|
||||
*/
|
||||
public static String mapMetricsName(String name, String unit, boolean isCounter, boolean isGauge) {
|
||||
// Replace "." into "_"
|
||||
name = name.replaceAll("\\.", "_");
|
||||
|
||||
String prometheusUnit = getPrometheusUnit(unit);
|
||||
boolean shouldAppendUnit = StringUtils.isNotBlank(prometheusUnit) && !name.contains(prometheusUnit);
|
||||
|
||||
// append prometheus unit if not null or empty.
|
||||
// unit should be appended before type suffix
|
||||
if (shouldAppendUnit) {
|
||||
name = name + "_" + prometheusUnit;
|
||||
}
|
||||
|
||||
// trim counter's _total suffix so the unit is placed before it.
|
||||
if (isCounter && name.endsWith(TOTAL_SUFFIX)) {
|
||||
name = name.substring(0, name.length() - TOTAL_SUFFIX.length());
|
||||
}
|
||||
|
||||
// replace _total suffix, or add if it wasn't already present.
|
||||
if (isCounter) {
|
||||
name = name + TOTAL_SUFFIX;
|
||||
}
|
||||
|
||||
// special case - gauge with intelligent Connect metric handling
|
||||
if ("1".equals(unit) && isGauge && !name.contains("ratio")) {
|
||||
if (isConnectMetric(name)) {
|
||||
// For Connect metrics, use improved logic to avoid misleading _ratio suffix
|
||||
if (shouldAddRatioSuffixForConnect(name)) {
|
||||
name = name + "_ratio";
|
||||
}
|
||||
} else {
|
||||
// For other metrics, maintain original behavior
|
||||
name = name + "_ratio";
|
||||
}
|
||||
}
|
||||
return name;
|
||||
}
|
||||
|
||||
/**
|
||||
* Map a label name to a Prometheus-compatible name.
|
||||
*
|
||||
* @param name The original label name.
|
||||
* @return The Prometheus-compatible label name.
|
||||
*/
|
||||
public static String mapLabelName(String name) {
|
||||
if (StringUtils.isBlank(name)) {
|
||||
return "";
|
||||
}
|
||||
return name.replaceAll("\\.", "_");
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a metric name is related to Kafka Connect.
|
||||
*
|
||||
* @param name The metric name to check.
|
||||
* @return true if it's a Connect metric, false otherwise.
|
||||
*/
|
||||
private static boolean isConnectMetric(String name) {
|
||||
String lowerName = name.toLowerCase(Locale.ROOT);
|
||||
return lowerName.contains("kafka_connector_") ||
|
||||
lowerName.contains("kafka_task_") ||
|
||||
lowerName.contains("kafka_worker_") ||
|
||||
lowerName.contains("kafka_connect_") ||
|
||||
lowerName.contains("kafka_source_task_") ||
|
||||
lowerName.contains("kafka_sink_task_") ||
|
||||
lowerName.contains("connector_metrics") ||
|
||||
lowerName.contains("task_metrics") ||
|
||||
lowerName.contains("worker_metrics") ||
|
||||
lowerName.contains("source_task_metrics") ||
|
||||
lowerName.contains("sink_task_metrics");
|
||||
}
|
||||
|
||||
/**
|
||||
* Intelligently determine if a Connect metric should have a _ratio suffix.
|
||||
* This method avoids adding misleading _ratio suffixes to count-based metrics.
|
||||
*
|
||||
* @param name The metric name to check.
|
||||
* @return true if _ratio suffix should be added, false otherwise.
|
||||
*/
|
||||
private static boolean shouldAddRatioSuffixForConnect(String name) {
|
||||
String lowerName = name.toLowerCase(Locale.ROOT);
|
||||
|
||||
if (hasRatioRelatedWords(lowerName)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (isCountMetric(lowerName)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return isRatioMetric(lowerName);
|
||||
}
|
||||
|
||||
private static boolean hasRatioRelatedWords(String lowerName) {
|
||||
return lowerName.contains("ratio") || lowerName.contains("percent") ||
|
||||
lowerName.contains("rate") || lowerName.contains("fraction");
|
||||
}
|
||||
|
||||
private static boolean isCountMetric(String lowerName) {
|
||||
return hasBasicCountKeywords(lowerName) || hasConnectCountKeywords(lowerName) ||
|
||||
hasStatusCountKeywords(lowerName);
|
||||
}
|
||||
|
||||
private static boolean hasBasicCountKeywords(String lowerName) {
|
||||
return lowerName.contains("count") || lowerName.contains("num") ||
|
||||
lowerName.contains("size") || lowerName.contains("total") ||
|
||||
lowerName.contains("active") || lowerName.contains("current");
|
||||
}
|
||||
|
||||
private static boolean hasConnectCountKeywords(String lowerName) {
|
||||
return lowerName.contains("partition") || lowerName.contains("task") ||
|
||||
lowerName.contains("connector") || lowerName.contains("seq_no") ||
|
||||
lowerName.contains("seq_num") || lowerName.contains("attempts");
|
||||
}
|
||||
|
||||
private static boolean hasStatusCountKeywords(String lowerName) {
|
||||
return lowerName.contains("success") || lowerName.contains("failure") ||
|
||||
lowerName.contains("errors") || lowerName.contains("retries") ||
|
||||
lowerName.contains("skipped") || lowerName.contains("running") ||
|
||||
lowerName.contains("paused") || lowerName.contains("failed") ||
|
||||
lowerName.contains("destroyed");
|
||||
}
|
||||
|
||||
private static boolean isRatioMetric(String lowerName) {
|
||||
return lowerName.contains("utilization") ||
|
||||
lowerName.contains("usage") ||
|
||||
lowerName.contains("load") ||
|
||||
lowerName.contains("efficiency") ||
|
||||
lowerName.contains("hit_rate") ||
|
||||
lowerName.contains("miss_rate");
|
||||
}
|
||||
}
|
||||
|
|
@ -1,63 +0,0 @@
|
|||
/*
|
||||
* Copyright 2025, AutoMQ HK Limited.
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package com.automq.opentelemetry.exporter.s3;
|
||||
|
||||
import com.automq.opentelemetry.exporter.MetricsExportConfig;
|
||||
import com.automq.opentelemetry.exporter.MetricsExporter;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.time.Duration;
|
||||
|
||||
import io.opentelemetry.sdk.metrics.export.MetricReader;
|
||||
import io.opentelemetry.sdk.metrics.export.PeriodicMetricReader;
|
||||
|
||||
/**
|
||||
* An adapter class that implements the MetricsExporter interface and uses S3MetricsExporter
|
||||
* for actual metrics exporting functionality.
|
||||
*/
|
||||
public class S3MetricsExporterAdapter implements MetricsExporter {
|
||||
private static final Logger LOGGER = LoggerFactory.getLogger(S3MetricsExporterAdapter.class);
|
||||
|
||||
private final MetricsExportConfig metricsExportConfig;
|
||||
|
||||
/**
|
||||
* Creates a new S3MetricsExporterAdapter.
|
||||
*
|
||||
* @param metricsExportConfig The configuration for the S3 metrics exporter.
|
||||
*/
|
||||
public S3MetricsExporterAdapter(MetricsExportConfig metricsExportConfig) {
|
||||
this.metricsExportConfig = metricsExportConfig;
|
||||
LOGGER.info("S3MetricsExporterAdapter initialized with labels :{}", metricsExportConfig.baseLabels());
|
||||
}
|
||||
|
||||
@Override
|
||||
public MetricReader asMetricReader() {
|
||||
// Create and start the S3MetricsExporter
|
||||
S3MetricsExporter s3MetricsExporter = new S3MetricsExporter(metricsExportConfig);
|
||||
s3MetricsExporter.start();
|
||||
|
||||
// Create and return the periodic metric reader
|
||||
return PeriodicMetricReader.builder(s3MetricsExporter)
|
||||
.setInterval(Duration.ofMillis(metricsExportConfig.intervalMs()))
|
||||
.build();
|
||||
}
|
||||
}
|
||||
|
|
@ -18,8 +18,7 @@ dependencies {
|
|||
compileOnly libs.awsSdkAuth
|
||||
implementation libs.reload4j
|
||||
implementation libs.nettyBuffer
|
||||
implementation project(':automq-metrics')
|
||||
implementation project(':automq-log-uploader')
|
||||
implementation libs.opentelemetrySdk
|
||||
implementation libs.jacksonDatabind
|
||||
implementation libs.jacksonYaml
|
||||
implementation libs.commonLang
|
||||
|
|
@ -66,4 +65,4 @@ jar {
|
|||
manifest {
|
||||
attributes 'Main-Class': 'com.automq.shell.AutoMQCLI'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -41,8 +41,9 @@ public class AutoMQApplication {
|
|||
if (override) {
|
||||
CONTAINER.put(type, singleton);
|
||||
return true;
|
||||
} else {
|
||||
return CONTAINER.putIfAbsent(type, singleton) == null;
|
||||
}
|
||||
return CONTAINER.putIfAbsent(type, singleton) == null;
|
||||
}
|
||||
|
||||
public static <T> T getBean(Class<T> type) {
|
||||
|
|
|
|||
|
|
@ -110,11 +110,9 @@ public class Deploy implements Callable<Integer> {
|
|||
String globalAccessKey = null;
|
||||
String globalSecretKey = null;
|
||||
for (Env env : topo.getGlobal().getEnvs()) {
|
||||
if ("KAFKA_S3_ACCESS_KEY".equals(env.getName()) ||
|
||||
"AWS_ACCESS_KEY_ID".equals(env.getName())) {
|
||||
if ("KAFKA_S3_ACCESS_KEY".equals(env.getName())) {
|
||||
globalAccessKey = env.getValue();
|
||||
} else if ("KAFKA_S3_SECRET_KEY".equals(env.getName()) ||
|
||||
"AWS_SECRET_ACCESS_KEY".equals(env.getName())) {
|
||||
} else if ("KAFKA_S3_SECRET_KEY".equals(env.getName())) {
|
||||
globalSecretKey = env.getValue();
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package com.automq.log.uploader;
|
||||
package com.automq.shell.log;
|
||||
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
|
||||
|
|
@ -17,9 +17,10 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package com.automq.log.uploader;
|
||||
package com.automq.shell.log;
|
||||
|
||||
import com.automq.log.uploader.util.Utils;
|
||||
import com.automq.shell.AutoMQApplication;
|
||||
import com.automq.shell.util.Utils;
|
||||
import com.automq.stream.s3.operator.ObjectStorage;
|
||||
import com.automq.stream.s3.operator.ObjectStorage.ObjectInfo;
|
||||
import com.automq.stream.s3.operator.ObjectStorage.ObjectPath;
|
||||
|
|
@ -54,14 +55,12 @@ public class LogUploader implements LogRecorder {
|
|||
|
||||
public static final int DEFAULT_MAX_QUEUE_SIZE = 64 * 1024;
|
||||
public static final int DEFAULT_BUFFER_SIZE = 16 * 1024 * 1024;
|
||||
public static final int UPLOAD_INTERVAL = System.getenv("AUTOMQ_OBSERVABILITY_UPLOAD_INTERVAL") != null
|
||||
? Integer.parseInt(System.getenv("AUTOMQ_OBSERVABILITY_UPLOAD_INTERVAL"))
|
||||
: 60 * 1000;
|
||||
public static final int CLEANUP_INTERVAL = System.getenv("AUTOMQ_OBSERVABILITY_CLEANUP_INTERVAL") != null
|
||||
? Integer.parseInt(System.getenv("AUTOMQ_OBSERVABILITY_CLEANUP_INTERVAL"))
|
||||
: 2 * 60 * 1000;
|
||||
public static final int UPLOAD_INTERVAL = System.getenv("AUTOMQ_OBSERVABILITY_UPLOAD_INTERVAL") != null ? Integer.parseInt(System.getenv("AUTOMQ_OBSERVABILITY_UPLOAD_INTERVAL")) : 60 * 1000;
|
||||
public static final int CLEANUP_INTERVAL = System.getenv("AUTOMQ_OBSERVABILITY_CLEANUP_INTERVAL") != null ? Integer.parseInt(System.getenv("AUTOMQ_OBSERVABILITY_CLEANUP_INTERVAL")) : 2 * 60 * 1000;
|
||||
public static final int MAX_JITTER_INTERVAL = 60 * 1000;
|
||||
|
||||
private static final LogUploader INSTANCE = new LogUploader();
|
||||
|
||||
private final BlockingQueue<LogEvent> queue = new LinkedBlockingQueue<>(DEFAULT_MAX_QUEUE_SIZE);
|
||||
private final ByteBuf uploadBuffer = Unpooled.directBuffer(DEFAULT_BUFFER_SIZE);
|
||||
private final Random random = new Random();
|
||||
|
|
@ -72,42 +71,16 @@ public class LogUploader implements LogRecorder {
|
|||
|
||||
private volatile S3LogConfig config;
|
||||
|
||||
private volatile CompletableFuture<Void> startFuture;
|
||||
private ObjectStorage objectStorage;
|
||||
private Thread uploadThread;
|
||||
private Thread cleanupThread;
|
||||
|
||||
public LogUploader() {
|
||||
private LogUploader() {
|
||||
}
|
||||
|
||||
public synchronized void start(S3LogConfig config) {
|
||||
if (this.config != null) {
|
||||
LOGGER.warn("LogUploader is already started.");
|
||||
return;
|
||||
}
|
||||
this.config = config;
|
||||
if (!config.isEnabled() || config.objectStorage() == null) {
|
||||
LOGGER.warn("LogUploader is disabled due to configuration.");
|
||||
closed = true;
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
this.objectStorage = config.objectStorage();
|
||||
this.uploadThread = new Thread(new UploadTask());
|
||||
this.uploadThread.setName("log-uploader-upload-thread");
|
||||
this.uploadThread.setDaemon(true);
|
||||
this.uploadThread.start();
|
||||
|
||||
this.cleanupThread = new Thread(new CleanupTask());
|
||||
this.cleanupThread.setName("log-uploader-cleanup-thread");
|
||||
this.cleanupThread.setDaemon(true);
|
||||
this.cleanupThread.start();
|
||||
|
||||
LOGGER.info("LogUploader started successfully.");
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("Failed to start LogUploader", e);
|
||||
closed = true;
|
||||
}
|
||||
public static LogUploader getInstance() {
|
||||
return INSTANCE;
|
||||
}
|
||||
|
||||
public void close() throws InterruptedException {
|
||||
|
|
@ -124,15 +97,63 @@ public class LogUploader implements LogRecorder {
|
|||
|
||||
@Override
|
||||
public boolean append(LogEvent event) {
|
||||
if (!closed) {
|
||||
if (!closed && couldUpload()) {
|
||||
return queue.offer(event);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
private boolean couldUpload() {
|
||||
initConfiguration();
|
||||
boolean enabled = config != null && config.isEnabled() && config.objectStorage() != null;
|
||||
|
||||
if (enabled) {
|
||||
initUploadComponent();
|
||||
}
|
||||
|
||||
return enabled && startFuture != null && startFuture.isDone();
|
||||
}
|
||||
|
||||
private void initConfiguration() {
|
||||
if (config == null) {
|
||||
synchronized (this) {
|
||||
if (config == null) {
|
||||
config = AutoMQApplication.getBean(S3LogConfig.class);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void initUploadComponent() {
|
||||
if (startFuture == null) {
|
||||
synchronized (this) {
|
||||
if (startFuture == null) {
|
||||
startFuture = CompletableFuture.runAsync(() -> {
|
||||
try {
|
||||
objectStorage = config.objectStorage();
|
||||
uploadThread = new Thread(new UploadTask());
|
||||
uploadThread.setName("log-uploader-upload-thread");
|
||||
uploadThread.setDaemon(true);
|
||||
uploadThread.start();
|
||||
|
||||
cleanupThread = new Thread(new CleanupTask());
|
||||
cleanupThread.setName("log-uploader-cleanup-thread");
|
||||
cleanupThread.setDaemon(true);
|
||||
cleanupThread.start();
|
||||
|
||||
startFuture.complete(null);
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("Initialize log uploader failed", e);
|
||||
}
|
||||
}, command -> new Thread(command).start());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private class UploadTask implements Runnable {
|
||||
|
||||
private String formatTimestampInMillis(long timestamp) {
|
||||
public String formatTimestampInMillis(long timestamp) {
|
||||
return ZonedDateTime.ofInstant(Instant.ofEpochMilli(timestamp), ZoneId.systemDefault())
|
||||
.format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss.SSS Z"));
|
||||
}
|
||||
|
|
@ -144,6 +165,7 @@ public class LogUploader implements LogRecorder {
|
|||
long now = System.currentTimeMillis();
|
||||
LogEvent event = queue.poll(1, TimeUnit.SECONDS);
|
||||
if (event != null) {
|
||||
// DateTime Level [Logger] Message \n stackTrace
|
||||
StringBuilder logLine = new StringBuilder()
|
||||
.append(formatTimestampInMillis(event.timestampMillis()))
|
||||
.append(" ")
|
||||
|
|
@ -182,22 +204,25 @@ public class LogUploader implements LogRecorder {
|
|||
|
||||
private void upload(long now) {
|
||||
if (uploadBuffer.readableBytes() > 0) {
|
||||
try {
|
||||
while (!Thread.currentThread().isInterrupted()) {
|
||||
if (objectStorage == null) {
|
||||
break;
|
||||
}
|
||||
try {
|
||||
String objectKey = getObjectKey();
|
||||
objectStorage.write(WriteOptions.DEFAULT, objectKey, Utils.compress(uploadBuffer.slice().asReadOnly())).get();
|
||||
break;
|
||||
} catch (Exception e) {
|
||||
LOGGER.warn("Failed to upload logs, will retry", e);
|
||||
Thread.sleep(1000);
|
||||
if (couldUpload()) {
|
||||
try {
|
||||
while (!Thread.currentThread().isInterrupted()) {
|
||||
if (objectStorage == null) {
|
||||
break;
|
||||
}
|
||||
|
||||
try {
|
||||
String objectKey = getObjectKey();
|
||||
objectStorage.write(WriteOptions.DEFAULT, objectKey, Utils.compress(uploadBuffer.slice().asReadOnly())).get();
|
||||
break;
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace(System.err);
|
||||
Thread.sleep(1000);
|
||||
}
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
//ignore
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
}
|
||||
uploadBuffer.clear();
|
||||
lastUploadTimestamp = now;
|
||||
|
|
@ -212,11 +237,12 @@ public class LogUploader implements LogRecorder {
|
|||
public void run() {
|
||||
while (!Thread.currentThread().isInterrupted()) {
|
||||
try {
|
||||
if (closed || !config.isLeader()) {
|
||||
if (closed || !config.isActiveController()) {
|
||||
Thread.sleep(Duration.ofMinutes(1).toMillis());
|
||||
continue;
|
||||
}
|
||||
long expiredTime = System.currentTimeMillis() - CLEANUP_INTERVAL;
|
||||
|
||||
List<ObjectInfo> objects = objectStorage.list(String.format("automq/logs/%s", config.clusterId())).join();
|
||||
|
||||
if (!objects.isEmpty()) {
|
||||
|
|
@ -226,6 +252,7 @@ public class LogUploader implements LogRecorder {
|
|||
.collect(Collectors.toList());
|
||||
|
||||
if (!keyList.isEmpty()) {
|
||||
// Some of s3 implements allow only 1000 keys per request.
|
||||
CompletableFuture<?>[] deleteFutures = Lists.partition(keyList, 1000)
|
||||
.stream()
|
||||
.map(objectStorage::delete)
|
||||
|
|
@ -233,6 +260,7 @@ public class LogUploader implements LogRecorder {
|
|||
CompletableFuture.allOf(deleteFutures).join();
|
||||
}
|
||||
}
|
||||
|
||||
Thread.sleep(Duration.ofMinutes(1).toMillis());
|
||||
} catch (InterruptedException e) {
|
||||
break;
|
||||
|
|
@ -247,4 +275,5 @@ public class LogUploader implements LogRecorder {
|
|||
String hour = LocalDateTime.now(ZoneOffset.UTC).format(DateTimeFormatter.ofPattern("yyyyMMddHH"));
|
||||
return String.format("automq/logs/%s/%s/%s/%s", config.clusterId(), config.nodeId(), hour, UUID.randomUUID());
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -17,18 +17,19 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package com.automq.log.uploader;
|
||||
package com.automq.shell.log;
|
||||
|
||||
import com.automq.stream.s3.operator.ObjectStorage;
|
||||
|
||||
public interface S3LogConfig {
|
||||
|
||||
boolean isEnabled();
|
||||
|
||||
boolean isActiveController();
|
||||
|
||||
String clusterId();
|
||||
|
||||
int nodeId();
|
||||
|
||||
ObjectStorage objectStorage();
|
||||
|
||||
boolean isLeader();
|
||||
}
|
||||
|
|
@ -0,0 +1,50 @@
|
|||
/*
|
||||
* Copyright 2025, AutoMQ HK Limited.
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package com.automq.shell.log;
|
||||
|
||||
import org.apache.log4j.RollingFileAppender;
|
||||
import org.apache.log4j.spi.LoggingEvent;
|
||||
|
||||
public class S3RollingFileAppender extends RollingFileAppender {
|
||||
private final LogUploader logUploader = LogUploader.getInstance();
|
||||
|
||||
@Override
|
||||
protected void subAppend(LoggingEvent event) {
|
||||
super.subAppend(event);
|
||||
if (!closed) {
|
||||
LogRecorder.LogEvent logEvent = new LogRecorder.LogEvent(
|
||||
event.getTimeStamp(),
|
||||
event.getLevel().toString(),
|
||||
event.getLoggerName(),
|
||||
event.getRenderedMessage(),
|
||||
event.getThrowableStrRep());
|
||||
|
||||
try {
|
||||
logEvent.validate();
|
||||
} catch (IllegalArgumentException e) {
|
||||
// Drop invalid log event
|
||||
errorHandler.error("Failed to validate log event", e, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
logUploader.append(logEvent);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,128 @@
|
|||
/*
|
||||
* Copyright 2025, AutoMQ HK Limited.
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package com.automq.shell.metrics;
|
||||
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
|
||||
public class PrometheusUtils {
|
||||
private static final String TOTAL_SUFFIX = "_total";
|
||||
|
||||
public static String getPrometheusUnit(String unit) {
|
||||
if (unit.contains("{")) {
|
||||
return "";
|
||||
}
|
||||
switch (unit) {
|
||||
// Time
|
||||
case "d":
|
||||
return "days";
|
||||
case "h":
|
||||
return "hours";
|
||||
case "min":
|
||||
return "minutes";
|
||||
case "s":
|
||||
return "seconds";
|
||||
case "ms":
|
||||
return "milliseconds";
|
||||
case "us":
|
||||
return "microseconds";
|
||||
case "ns":
|
||||
return "nanoseconds";
|
||||
// Bytes
|
||||
case "By":
|
||||
return "bytes";
|
||||
case "KiBy":
|
||||
return "kibibytes";
|
||||
case "MiBy":
|
||||
return "mebibytes";
|
||||
case "GiBy":
|
||||
return "gibibytes";
|
||||
case "TiBy":
|
||||
return "tibibytes";
|
||||
case "KBy":
|
||||
return "kilobytes";
|
||||
case "MBy":
|
||||
return "megabytes";
|
||||
case "GBy":
|
||||
return "gigabytes";
|
||||
case "TBy":
|
||||
return "terabytes";
|
||||
// SI
|
||||
case "m":
|
||||
return "meters";
|
||||
case "V":
|
||||
return "volts";
|
||||
case "A":
|
||||
return "amperes";
|
||||
case "J":
|
||||
return "joules";
|
||||
case "W":
|
||||
return "watts";
|
||||
case "g":
|
||||
return "grams";
|
||||
// Misc
|
||||
case "Cel":
|
||||
return "celsius";
|
||||
case "Hz":
|
||||
return "hertz";
|
||||
case "1":
|
||||
return "";
|
||||
case "%":
|
||||
return "percent";
|
||||
default:
|
||||
return unit;
|
||||
}
|
||||
}
|
||||
|
||||
public static String mapMetricsName(String name, String unit, boolean isCounter, boolean isGauge) {
|
||||
// Replace "." into "_"
|
||||
name = name.replaceAll("\\.", "_");
|
||||
|
||||
String prometheusUnit = getPrometheusUnit(unit);
|
||||
boolean shouldAppendUnit = StringUtils.isNotBlank(prometheusUnit) && !name.contains(prometheusUnit);
|
||||
|
||||
// append prometheus unit if not null or empty.
|
||||
// unit should be appended before type suffix
|
||||
if (shouldAppendUnit) {
|
||||
name = name + "_" + prometheusUnit;
|
||||
}
|
||||
|
||||
// trim counter's _total suffix so the unit is placed before it.
|
||||
if (isCounter && name.endsWith(TOTAL_SUFFIX)) {
|
||||
name = name.substring(0, name.length() - TOTAL_SUFFIX.length());
|
||||
}
|
||||
|
||||
// replace _total suffix, or add if it wasn't already present.
|
||||
if (isCounter) {
|
||||
name = name + TOTAL_SUFFIX;
|
||||
}
|
||||
// special case - gauge
|
||||
if (unit.equals("1") && isGauge && !name.contains("ratio")) {
|
||||
name = name + "_ratio";
|
||||
}
|
||||
return name;
|
||||
}
|
||||
|
||||
public static String mapLabelName(String name) {
|
||||
if (StringUtils.isBlank(name)) {
|
||||
return "";
|
||||
}
|
||||
return name.replaceAll("\\.", "_");
|
||||
}
|
||||
}
|
||||
|
|
@ -17,15 +17,23 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package kafka.automq.table.process;
|
||||
package com.automq.shell.metrics;
|
||||
|
||||
import kafka.automq.table.process.exception.ConverterException;
|
||||
import com.automq.stream.s3.operator.ObjectStorage;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
public interface Converter {
|
||||
public interface S3MetricsConfig {
|
||||
|
||||
ConversionResult convert(String topic, ByteBuffer buffer) throws ConverterException;
|
||||
String clusterId();
|
||||
|
||||
boolean isActiveController();
|
||||
|
||||
int nodeId();
|
||||
|
||||
ObjectStorage objectStorage();
|
||||
|
||||
List<Pair<String, String>> baseLabels();
|
||||
}
|
||||
|
|
@ -17,14 +17,13 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package com.automq.opentelemetry.exporter.s3;
|
||||
package com.automq.shell.metrics;
|
||||
|
||||
import com.automq.opentelemetry.exporter.MetricsExportConfig;
|
||||
import com.automq.shell.util.Utils;
|
||||
import com.automq.stream.s3.operator.ObjectStorage;
|
||||
import com.automq.stream.s3.operator.ObjectStorage.ObjectInfo;
|
||||
import com.automq.stream.s3.operator.ObjectStorage.ObjectPath;
|
||||
import com.automq.stream.s3.operator.ObjectStorage.WriteOptions;
|
||||
import com.automq.stream.utils.Threads;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.fasterxml.jackson.databind.node.ArrayNode;
|
||||
import com.fasterxml.jackson.databind.node.ObjectNode;
|
||||
|
|
@ -60,9 +59,6 @@ import io.opentelemetry.sdk.metrics.data.HistogramPointData;
|
|||
import io.opentelemetry.sdk.metrics.data.MetricData;
|
||||
import io.opentelemetry.sdk.metrics.export.MetricExporter;
|
||||
|
||||
/**
|
||||
* An S3 metrics exporter that uploads metrics data to S3 buckets.
|
||||
*/
|
||||
public class S3MetricsExporter implements MetricExporter {
|
||||
private static final Logger LOGGER = LoggerFactory.getLogger(S3MetricsExporter.class);
|
||||
|
||||
|
|
@ -71,13 +67,13 @@ public class S3MetricsExporter implements MetricExporter {
|
|||
public static final int MAX_JITTER_INTERVAL = 60 * 1000;
|
||||
public static final int DEFAULT_BUFFER_SIZE = 16 * 1024 * 1024;
|
||||
|
||||
private final MetricsExportConfig config;
|
||||
private final S3MetricsConfig config;
|
||||
private final Map<String, String> defaultTagMap = new HashMap<>();
|
||||
|
||||
private final ByteBuf uploadBuffer = Unpooled.directBuffer(DEFAULT_BUFFER_SIZE);
|
||||
private static final Random RANDOM = new Random();
|
||||
private final Random random = new Random();
|
||||
private volatile long lastUploadTimestamp = System.currentTimeMillis();
|
||||
private volatile long nextUploadInterval = UPLOAD_INTERVAL + RANDOM.nextInt(MAX_JITTER_INTERVAL);
|
||||
private volatile long nextUploadInterval = UPLOAD_INTERVAL + random.nextInt(MAX_JITTER_INTERVAL);
|
||||
|
||||
private final ObjectStorage objectStorage;
|
||||
private final ObjectMapper objectMapper = new ObjectMapper();
|
||||
|
|
@ -86,12 +82,7 @@ public class S3MetricsExporter implements MetricExporter {
|
|||
private final Thread uploadThread;
|
||||
private final Thread cleanupThread;
|
||||
|
||||
/**
|
||||
* Creates a new S3MetricsExporter.
|
||||
*
|
||||
* @param config The configuration for the S3 metrics exporter.
|
||||
*/
|
||||
public S3MetricsExporter(MetricsExportConfig config) {
|
||||
public S3MetricsExporter(S3MetricsConfig config) {
|
||||
this.config = config;
|
||||
this.objectStorage = config.objectStorage();
|
||||
|
||||
|
|
@ -109,9 +100,6 @@ public class S3MetricsExporter implements MetricExporter {
|
|||
cleanupThread.setDaemon(true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Starts the exporter threads.
|
||||
*/
|
||||
public void start() {
|
||||
uploadThread.start();
|
||||
cleanupThread.start();
|
||||
|
|
@ -150,7 +138,7 @@ public class S3MetricsExporter implements MetricExporter {
|
|||
public void run() {
|
||||
while (!Thread.currentThread().isInterrupted()) {
|
||||
try {
|
||||
if (closed || !config.isLeader()) {
|
||||
if (closed || !config.isActiveController()) {
|
||||
Thread.sleep(Duration.ofMinutes(1).toMillis());
|
||||
continue;
|
||||
}
|
||||
|
|
@ -173,7 +161,8 @@ public class S3MetricsExporter implements MetricExporter {
|
|||
CompletableFuture.allOf(deleteFutures).join();
|
||||
}
|
||||
}
|
||||
Threads.sleep(Duration.ofMinutes(1).toMillis());
|
||||
|
||||
Thread.sleep(Duration.ofMinutes(1).toMillis());
|
||||
} catch (InterruptedException e) {
|
||||
break;
|
||||
} catch (Exception e) {
|
||||
|
|
@ -262,13 +251,13 @@ public class S3MetricsExporter implements MetricExporter {
|
|||
synchronized (uploadBuffer) {
|
||||
if (uploadBuffer.readableBytes() > 0) {
|
||||
try {
|
||||
objectStorage.write(WriteOptions.DEFAULT, getObjectKey(), CompressionUtils.compress(uploadBuffer.slice().asReadOnly())).get();
|
||||
objectStorage.write(WriteOptions.DEFAULT, getObjectKey(), Utils.compress(uploadBuffer.slice().asReadOnly())).get();
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("Failed to upload metrics to s3", e);
|
||||
return CompletableResultCode.ofFailure();
|
||||
} finally {
|
||||
lastUploadTimestamp = System.currentTimeMillis();
|
||||
nextUploadInterval = UPLOAD_INTERVAL + RANDOM.nextInt(MAX_JITTER_INTERVAL);
|
||||
nextUploadInterval = UPLOAD_INTERVAL + random.nextInt(MAX_JITTER_INTERVAL);
|
||||
uploadBuffer.clear();
|
||||
}
|
||||
}
|
||||
|
|
@ -37,6 +37,7 @@ import org.apache.kafka.common.requests.s3.GetKVsRequest;
|
|||
import org.apache.kafka.common.requests.s3.PutKVsRequest;
|
||||
import org.apache.kafka.common.utils.Time;
|
||||
|
||||
import com.automq.shell.metrics.S3MetricsExporter;
|
||||
import com.automq.stream.api.KeyValue;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
|
|
@ -47,7 +48,7 @@ import java.util.List;
|
|||
import java.util.Objects;
|
||||
|
||||
public class ClientKVClient {
|
||||
private static final Logger LOGGER = LoggerFactory.getLogger(ClientKVClient.class);
|
||||
private static final Logger LOGGER = LoggerFactory.getLogger(S3MetricsExporter.class);
|
||||
|
||||
private final NetworkClient networkClient;
|
||||
private final Node bootstrapServer;
|
||||
|
|
|
|||
|
|
@ -9,12 +9,10 @@ global:
|
|||
config: |
|
||||
s3.data.buckets=0@s3://xxx_bucket?region=us-east-1
|
||||
s3.ops.buckets=1@s3://xxx_bucket?region=us-east-1
|
||||
s3.wal.path=0@s3://xxx_bucket?region=us-east-1
|
||||
log.dirs=/root/kraft-logs
|
||||
envs:
|
||||
- name: AWS_ACCESS_KEY_ID
|
||||
- name: KAFKA_S3_ACCESS_KEY
|
||||
value: 'xxxxx'
|
||||
- name: AWS_SECRET_ACCESS_KEY
|
||||
- name: KAFKA_S3_SECRET_KEY
|
||||
value: 'xxxxx'
|
||||
controllers:
|
||||
# The controllers default are combined nodes which roles are controller and broker.
|
||||
|
|
|
|||
|
|
@ -23,10 +23,4 @@ fi
|
|||
if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
|
||||
export KAFKA_HEAP_OPTS="-Xmx1024M"
|
||||
fi
|
||||
# Add additional help info for the new parameter (this won't be displayed directly but documents the change)
|
||||
# --consumers-during-catchup: Percentage of consumers to activate during catch-up read (0-100, default: 100)
|
||||
# This allows controlling what percentage of consumer groups are activated during catch-up
|
||||
# reading to better simulate real-world scenarios where only a subset of consumers
|
||||
# experience catch-up reads at the same time.
|
||||
|
||||
exec "$(dirname "$0")/kafka-run-class.sh" -name kafkaClient -loggc org.apache.kafka.tools.automq.PerfCommand "$@"
|
||||
|
|
|
|||
|
|
@ -42,5 +42,4 @@ case $COMMAND in
|
|||
;;
|
||||
esac
|
||||
|
||||
export KAFKA_CONNECT_MODE=true
|
||||
exec $(dirname $0)/kafka-run-class.sh $EXTRA_ARGS org.apache.kafka.connect.cli.ConnectDistributed "$@"
|
||||
|
|
|
|||
|
|
@ -42,5 +42,4 @@ case $COMMAND in
|
|||
;;
|
||||
esac
|
||||
|
||||
export KAFKA_CONNECT_MODE=true
|
||||
exec $(dirname $0)/kafka-run-class.sh $EXTRA_ARGS org.apache.kafka.connect.cli.ConnectStandalone "$@"
|
||||
|
|
|
|||
|
|
@ -40,23 +40,7 @@ should_include_file() {
|
|||
fi
|
||||
file=$1
|
||||
if [ -z "$(echo "$file" | grep -E "$regex")" ] ; then
|
||||
# If Connect mode is enabled, apply additional filtering
|
||||
if [ "$KAFKA_CONNECT_MODE" = "true" ]; then
|
||||
# Skip if file doesn't exist
|
||||
[ ! -f "$file" ] && return 1
|
||||
|
||||
# Exclude heavy dependencies that Connect doesn't need
|
||||
case "$file" in
|
||||
*hadoop*) return 1 ;;
|
||||
*hive*) return 1 ;;
|
||||
*iceberg*) return 1 ;;
|
||||
*avro*) return 1 ;;
|
||||
*parquet*) return 1 ;;
|
||||
*) return 0 ;;
|
||||
esac
|
||||
else
|
||||
return 0
|
||||
fi
|
||||
return 0
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
|
|
|
|||
255
build.gradle
255
build.gradle
|
|
@ -53,7 +53,7 @@ plugins {
|
|||
|
||||
ext {
|
||||
gradleVersion = versions.gradle
|
||||
minJavaVersion = 17
|
||||
minJavaVersion = 11
|
||||
buildVersionFileName = "kafka-version.properties"
|
||||
|
||||
defaultMaxHeapSize = "2g"
|
||||
|
|
@ -150,10 +150,6 @@ allprojects {
|
|||
}
|
||||
|
||||
configurations.all {
|
||||
// Globally exclude commons-logging and logback to ensure a single logging implementation (reload4j)
|
||||
exclude group: "commons-logging", module: "commons-logging"
|
||||
exclude group: "ch.qos.logback", module: "logback-classic"
|
||||
exclude group: "ch.qos.logback", module: "logback-core"
|
||||
// zinc is the Scala incremental compiler, it has a configuration for its own dependencies
|
||||
// that are unrelated to the project dependencies, we should not change them
|
||||
if (name != "zinc") {
|
||||
|
|
@ -264,10 +260,7 @@ subprojects {
|
|||
options.compilerArgs << "-Xlint:-rawtypes"
|
||||
options.compilerArgs << "-Xlint:-serial"
|
||||
options.compilerArgs << "-Xlint:-try"
|
||||
// AutoMQ inject start
|
||||
// TODO: remove me, when upgrade to 4.x
|
||||
// options.compilerArgs << "-Werror"
|
||||
// AutoMQ inject start
|
||||
options.compilerArgs << "-Werror"
|
||||
|
||||
// --release is the recommended way to select the target release, but it's only supported in Java 9 so we also
|
||||
// set --source and --target via `sourceCompatibility` and `targetCompatibility` a couple of lines below
|
||||
|
|
@ -838,13 +831,6 @@ tasks.create(name: "jarConnect", dependsOn: connectPkgs.collect { it + ":jar" })
|
|||
|
||||
tasks.create(name: "testConnect", dependsOn: connectPkgs.collect { it + ":test" }) {}
|
||||
|
||||
// OpenTelemetry related tasks
|
||||
tasks.create(name: "jarOpenTelemetry", dependsOn: ":opentelemetry:jar") {}
|
||||
|
||||
tasks.create(name: "testOpenTelemetry", dependsOn: ":opentelemetry:test") {}
|
||||
|
||||
tasks.create(name: "buildOpenTelemetry", dependsOn: [":opentelemetry:jar", ":opentelemetry:test"]) {}
|
||||
|
||||
project(':server') {
|
||||
base {
|
||||
archivesName = "kafka-server"
|
||||
|
|
@ -945,8 +931,6 @@ project(':core') {
|
|||
implementation project(':storage')
|
||||
implementation project(':server')
|
||||
implementation project(':automq-shell')
|
||||
implementation project(':automq-metrics')
|
||||
implementation project(':automq-log-uploader')
|
||||
|
||||
implementation libs.argparse4j
|
||||
implementation libs.commonsValidator
|
||||
|
|
@ -984,77 +968,17 @@ project(':core') {
|
|||
implementation libs.guava
|
||||
implementation libs.slf4jBridge
|
||||
implementation libs.slf4jReload4j
|
||||
// The `jcl-over-slf4j` library is used to redirect JCL logging to SLF4J.
|
||||
implementation libs.jclOverSlf4j
|
||||
|
||||
implementation libs.opentelemetryJava8
|
||||
implementation libs.opentelemetryOshi
|
||||
implementation libs.opentelemetrySdk
|
||||
implementation libs.opentelemetrySdkMetrics
|
||||
implementation libs.opentelemetryExporterLogging
|
||||
implementation libs.opentelemetryExporterProm
|
||||
implementation libs.opentelemetryExporterOTLP
|
||||
implementation libs.opentelemetryJmx
|
||||
implementation libs.awsSdkAuth
|
||||
|
||||
// table topic start
|
||||
implementation ("org.apache.avro:avro:${versions.avro}")
|
||||
implementation ("org.apache.avro:avro-protobuf:${versions.avro}")
|
||||
implementation('com.google.protobuf:protobuf-java:3.25.5')
|
||||
implementation ("org.apache.iceberg:iceberg-core:${versions.iceberg}")
|
||||
implementation ("org.apache.iceberg:iceberg-api:${versions.iceberg}")
|
||||
implementation ("org.apache.iceberg:iceberg-data:${versions.iceberg}")
|
||||
implementation ("org.apache.iceberg:iceberg-parquet:${versions.iceberg}")
|
||||
implementation ("org.apache.iceberg:iceberg-common:${versions.iceberg}")
|
||||
implementation ("org.apache.iceberg:iceberg-aws:${versions.iceberg}")
|
||||
implementation ("org.apache.iceberg:iceberg-nessie:${versions.iceberg}")
|
||||
implementation ("software.amazon.awssdk:glue:${versions.awsSdk}")
|
||||
implementation ("software.amazon.awssdk:s3tables:${versions.awsSdk}")
|
||||
implementation 'software.amazon.s3tables:s3-tables-catalog-for-iceberg:0.1.0'
|
||||
|
||||
implementation ('org.apache.hadoop:hadoop-common:3.4.1') {
|
||||
exclude group: 'org.eclipse.jetty', module: '*'
|
||||
exclude group: 'com.sun.jersey', module: '*'
|
||||
}
|
||||
// for hadoop common
|
||||
implementation ("org.eclipse.jetty:jetty-webapp:${versions.jetty}")
|
||||
|
||||
implementation (libs.kafkaAvroSerializer) {
|
||||
exclude group: 'org.apache.kafka', module: 'kafka-clients'
|
||||
}
|
||||
|
||||
// > hive ext start
|
||||
implementation 'org.apache.iceberg:iceberg-hive-metastore:1.6.1'
|
||||
implementation('org.apache.hive:hive-metastore:3.1.3') {
|
||||
// Remove useless dependencies (copy from iceberg-kafka-connect)
|
||||
exclude group: "org.apache.avro", module: "avro"
|
||||
exclude group: "org.slf4j", module: "slf4j-log4j12"
|
||||
exclude group: "org.pentaho" // missing dependency
|
||||
exclude group: "org.apache.hbase"
|
||||
exclude group: "org.apache.logging.log4j"
|
||||
exclude group: "co.cask.tephra"
|
||||
exclude group: "com.google.code.findbugs", module: "jsr305"
|
||||
exclude group: "org.eclipse.jetty.aggregate", module: "jetty-all"
|
||||
exclude group: "org.eclipse.jetty.orbit", module: "javax.servlet"
|
||||
exclude group: "org.apache.parquet", module: "parquet-hadoop-bundle"
|
||||
exclude group: "com.tdunning", module: "json"
|
||||
exclude group: "javax.transaction", module: "transaction-api"
|
||||
exclude group: "com.zaxxer", module: "HikariCP"
|
||||
exclude group: "org.apache.hadoop", module: "hadoop-yarn-server-common"
|
||||
exclude group: "org.apache.hadoop", module: "hadoop-yarn-server-applicationhistoryservice"
|
||||
exclude group: "org.apache.hadoop", module: "hadoop-yarn-server-resourcemanager"
|
||||
exclude group: "org.apache.hadoop", module: "hadoop-yarn-server-web-proxy"
|
||||
exclude group: "org.apache.hive", module: "hive-service-rpc"
|
||||
exclude group: "com.github.joshelser", module: "dropwizard-metrics-hadoop-metrics2-reporter"
|
||||
}
|
||||
implementation ('org.apache.hadoop:hadoop-mapreduce-client-core:3.4.1') {
|
||||
exclude group: 'com.sun.jersey', module: '*'
|
||||
exclude group: 'com.sun.jersey.contribs', module: '*'
|
||||
exclude group: 'com.github.pjfanning', module: 'jersey-json'
|
||||
}
|
||||
// > hive ext end
|
||||
|
||||
// > Protobuf ext start
|
||||
// Wire Runtime for schema handling
|
||||
implementation ("com.squareup.wire:wire-schema:${versions.wire}")
|
||||
implementation ("com.squareup.wire:wire-runtime:${versions.wire}")
|
||||
implementation 'com.google.api.grpc:proto-google-common-protos:2.52.0'
|
||||
// > Protobuf ext end
|
||||
|
||||
// table topic end
|
||||
|
||||
implementation(libs.oshi) {
|
||||
exclude group: 'org.slf4j', module: '*'
|
||||
}
|
||||
|
|
@ -1069,7 +993,6 @@ project(':core') {
|
|||
testImplementation project(':storage:storage-api').sourceSets.test.output
|
||||
testImplementation project(':server').sourceSets.test.output
|
||||
testImplementation libs.bcpkix
|
||||
testImplementation libs.mockitoJunitJupiter // supports MockitoExtension
|
||||
testImplementation libs.mockitoCore
|
||||
testImplementation libs.guava
|
||||
testImplementation(libs.apacheda) {
|
||||
|
|
@ -1250,10 +1173,6 @@ project(':core') {
|
|||
from(project(':trogdor').configurations.runtimeClasspath) { into("libs/") }
|
||||
from(project(':automq-shell').jar) { into("libs/") }
|
||||
from(project(':automq-shell').configurations.runtimeClasspath) { into("libs/") }
|
||||
from(project(':automq-metrics').jar) { into("libs/") }
|
||||
from(project(':automq-metrics').configurations.runtimeClasspath) { into("libs/") }
|
||||
from(project(':automq-log-uploader').jar) { into("libs/") }
|
||||
from(project(':automq-log-uploader').configurations.runtimeClasspath) { into("libs/") }
|
||||
from(project(':shell').jar) { into("libs/") }
|
||||
from(project(':shell').configurations.runtimeClasspath) { into("libs/") }
|
||||
from(project(':connect:api').jar) { into("libs/") }
|
||||
|
|
@ -1285,38 +1204,6 @@ project(':core') {
|
|||
duplicatesStrategy 'exclude'
|
||||
}
|
||||
|
||||
// AutoMQ inject start
|
||||
tasks.create(name: "releaseE2ETar", dependsOn: [configurations.archives.artifacts, 'copyDependantTestLibs'], type: Tar) {
|
||||
def prefix = project.findProperty('prefix') ?: ''
|
||||
archiveBaseName = "${prefix}kafka"
|
||||
|
||||
into "${prefix}kafka-${archiveVersion.get()}"
|
||||
compression = Compression.GZIP
|
||||
from(project.file("$rootDir/bin")) { into "bin/" }
|
||||
from(project.file("$rootDir/config")) { into "config/" }
|
||||
from(project.file("$rootDir/licenses")) { into "licenses/" }
|
||||
from(project.file("$rootDir/docker/docker-compose.yaml")) { into "docker/" }
|
||||
from(project.file("$rootDir/docker/telemetry")) { into "docker/telemetry/" }
|
||||
from(project.file("$rootDir/LICENSE")) { into "" }
|
||||
from "$rootDir/NOTICE-binary" rename {String filename -> filename.replace("-binary", "")}
|
||||
from(configurations.runtimeClasspath) { into("libs/") }
|
||||
from(configurations.archives.artifacts.files) { into("libs/") }
|
||||
from(project.siteDocsTar) { into("site-docs/") }
|
||||
|
||||
// Include main and test jars from all subprojects
|
||||
rootProject.subprojects.each { subproject ->
|
||||
if (subproject.tasks.findByName('jar')) {
|
||||
from(subproject.tasks.named('jar')) { into('libs/') }
|
||||
}
|
||||
if (subproject.tasks.findByName('testJar')) {
|
||||
from(subproject.tasks.named('testJar')) { into('libs/') }
|
||||
}
|
||||
from(subproject.configurations.runtimeClasspath) { into('libs/') }
|
||||
}
|
||||
duplicatesStrategy 'exclude'
|
||||
}
|
||||
// AutoMQ inject end
|
||||
|
||||
jar {
|
||||
dependsOn('copyDependantLibs')
|
||||
}
|
||||
|
|
@ -1368,7 +1255,6 @@ project(':core') {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
project(':metadata') {
|
||||
base {
|
||||
archivesName = "kafka-metadata"
|
||||
|
|
@ -1391,7 +1277,6 @@ project(':metadata') {
|
|||
implementation libs.guava
|
||||
implementation libs.awsSdkAuth
|
||||
implementation project(':s3stream')
|
||||
implementation ("org.apache.avro:avro:${versions.avro}")
|
||||
|
||||
implementation libs.jacksonDatabind
|
||||
implementation libs.jacksonJDK8Datatypes
|
||||
|
|
@ -1962,12 +1847,7 @@ project(':server-common') {
|
|||
implementation libs.jacksonDatabind
|
||||
implementation libs.pcollections
|
||||
implementation libs.opentelemetrySdk
|
||||
|
||||
// AutoMQ inject start
|
||||
implementation project(':s3stream')
|
||||
implementation libs.commonLang
|
||||
// AutoMQ inject end
|
||||
|
||||
|
||||
testImplementation project(':clients')
|
||||
testImplementation project(':clients').sourceSets.test.output
|
||||
|
|
@ -2264,10 +2144,10 @@ project(':s3stream') {
|
|||
implementation 'software.amazon.awssdk.crt:aws-crt:0.30.8'
|
||||
implementation 'com.ibm.async:asyncutil:0.1.0'
|
||||
|
||||
testImplementation 'org.slf4j:slf4j-simple:1.7.36'
|
||||
testImplementation libs.junitJupiter
|
||||
testImplementation libs.mockitoCore
|
||||
testImplementation libs.mockitoJunitJupiter // supports MockitoExtension
|
||||
testImplementation 'org.slf4j:slf4j-simple:2.0.9'
|
||||
testImplementation 'org.junit.jupiter:junit-jupiter:5.10.0'
|
||||
testImplementation 'org.mockito:mockito-core:5.5.0'
|
||||
testImplementation 'org.mockito:mockito-junit-jupiter:5.5.0'
|
||||
testImplementation 'org.awaitility:awaitility:4.2.1'
|
||||
}
|
||||
|
||||
|
|
@ -2334,107 +2214,6 @@ project(':tools:tools-api') {
|
|||
}
|
||||
}
|
||||
|
||||
project(':automq-metrics') {
|
||||
archivesBaseName = "automq-metrics"
|
||||
|
||||
checkstyle {
|
||||
configProperties = checkstyleConfigProperties("import-control-server.xml")
|
||||
}
|
||||
|
||||
configurations {
|
||||
all {
|
||||
exclude group: 'io.opentelemetry', module: 'opentelemetry-exporter-sender-okhttp'
|
||||
}
|
||||
}
|
||||
|
||||
dependencies {
|
||||
// OpenTelemetry core dependencies
|
||||
api libs.opentelemetryJava8
|
||||
api libs.opentelemetryOshi
|
||||
api libs.opentelemetrySdk
|
||||
api libs.opentelemetrySdkMetrics
|
||||
api libs.opentelemetryExporterLogging
|
||||
api libs.opentelemetryExporterProm
|
||||
api libs.opentelemetryExporterOTLP
|
||||
api libs.opentelemetryExporterSenderJdk
|
||||
api libs.opentelemetryJmx
|
||||
|
||||
// Logging dependencies
|
||||
api libs.slf4jApi
|
||||
api libs.slf4jBridge // 添加 SLF4J Bridge 依赖
|
||||
api libs.reload4j
|
||||
|
||||
api libs.commonLang
|
||||
|
||||
// Yammer metrics (for integration)
|
||||
api 'com.yammer.metrics:metrics-core:2.2.0'
|
||||
|
||||
implementation(project(':s3stream')) {
|
||||
exclude(group: 'io.opentelemetry', module: '*')
|
||||
exclude(group: 'io.opentelemetry.instrumentation', module: '*')
|
||||
exclude(group: 'io.opentelemetry.proto', module: '*')
|
||||
exclude(group: 'io.netty', module: 'netty-tcnative-boringssl-static')
|
||||
exclude(group: 'com.github.jnr', module: '*')
|
||||
exclude(group: 'org.aspectj', module: '*')
|
||||
exclude(group: 'net.java.dev.jna', module: '*')
|
||||
exclude(group: 'net.sourceforge.argparse4j', module: '*')
|
||||
exclude(group: 'com.bucket4j', module: '*')
|
||||
exclude(group: 'com.yammer.metrics', module: '*')
|
||||
exclude(group: 'com.github.spotbugs', module: '*')
|
||||
exclude(group: 'org.apache.kafka.shaded', module: '*')
|
||||
}
|
||||
implementation libs.nettyBuffer
|
||||
implementation libs.jacksonDatabind
|
||||
implementation libs.guava
|
||||
implementation project(':clients')
|
||||
|
||||
// Test dependencies
|
||||
testImplementation libs.junitJupiter
|
||||
testImplementation libs.mockitoCore
|
||||
testImplementation libs.slf4jReload4j
|
||||
|
||||
testRuntimeOnly libs.junitPlatformLanucher
|
||||
|
||||
implementation('io.opentelemetry:opentelemetry-sdk:1.40.0')
|
||||
implementation("io.opentelemetry.semconv:opentelemetry-semconv:1.25.0-alpha")
|
||||
implementation("io.opentelemetry.instrumentation:opentelemetry-runtime-telemetry-java8:2.6.0-alpha")
|
||||
implementation('com.google.protobuf:protobuf-java:3.25.5')
|
||||
implementation('org.xerial.snappy:snappy-java:1.1.10.5')
|
||||
}
|
||||
|
||||
clean.doFirst {
|
||||
delete "$buildDir/kafka/"
|
||||
}
|
||||
|
||||
javadoc {
|
||||
enabled = false
|
||||
}
|
||||
}
|
||||
|
||||
project(':automq-log-uploader') {
|
||||
archivesBaseName = "automq-log-uploader"
|
||||
|
||||
checkstyle {
|
||||
configProperties = checkstyleConfigProperties("import-control-server.xml")
|
||||
}
|
||||
|
||||
dependencies {
|
||||
api project(':s3stream')
|
||||
|
||||
implementation project(':clients')
|
||||
implementation libs.reload4j
|
||||
implementation libs.slf4jApi
|
||||
implementation libs.slf4jBridge
|
||||
implementation libs.nettyBuffer
|
||||
implementation libs.guava
|
||||
implementation libs.commonLang
|
||||
}
|
||||
|
||||
javadoc {
|
||||
enabled = false
|
||||
}
|
||||
}
|
||||
|
||||
project(':tools') {
|
||||
base {
|
||||
archivesName = "kafka-tools"
|
||||
|
|
@ -2456,9 +2235,6 @@ project(':tools') {
|
|||
exclude group: 'org.apache.kafka', module: 'kafka-clients'
|
||||
}
|
||||
implementation libs.bucket4j
|
||||
implementation (libs.oshi){
|
||||
exclude group: 'org.slf4j', module: '*'
|
||||
}
|
||||
// AutoMQ inject end
|
||||
|
||||
implementation project(':storage')
|
||||
|
|
@ -3542,8 +3318,6 @@ project(':connect:runtime') {
|
|||
api project(':clients')
|
||||
api project(':connect:json')
|
||||
api project(':connect:transforms')
|
||||
api project(':automq-metrics')
|
||||
api project(':automq-log-uploader')
|
||||
|
||||
implementation libs.slf4jApi
|
||||
implementation libs.reload4j
|
||||
|
|
@ -3552,7 +3326,6 @@ project(':connect:runtime') {
|
|||
implementation libs.jacksonJaxrsJsonProvider
|
||||
implementation libs.jerseyContainerServlet
|
||||
implementation libs.jerseyHk2
|
||||
implementation libs.jaxrsApi
|
||||
implementation libs.jaxbApi // Jersey dependency that was available in the JDK before Java 9
|
||||
implementation libs.activation // Jersey dependency that was available in the JDK before Java 9
|
||||
implementation libs.jettyServer
|
||||
|
|
|
|||
|
|
@ -1,62 +0,0 @@
|
|||
# AutoMQ
|
||||
|
||||
[AutoMQ](https://www.automq.com/) is a cloud-native alternative to Kafka by decoupling durability to cloud storage services like S3. 10x Cost-Effective. No Cross-AZ Traffic Cost. Autoscale in seconds. Single-digit ms latency.
|
||||
This Helm chart simplifies the deployment of AutoMQ into your Kubernetes cluster using the Software model.
|
||||
|
||||
## Prerequisites
|
||||
### Install Helm chart
|
||||
Install Helm chart and version v3.8.0+
|
||||
[Helm chart quickstart](https://helm.sh/zh/docs/intro/quickstart/)
|
||||
```shell
|
||||
helm version
|
||||
```
|
||||
### Using the Bitnami Helm repository
|
||||
AutoMQ is fully compatible with Bitnami's Helm Charts, so you can customize your AutoMQ Kubernetes cluster based on the relevant values.yaml of Bitnami.
|
||||
[Bitnami Helm Charts](https://github.com/bitnami/charts)
|
||||
|
||||
## Quickstart
|
||||
### Setup a Kubernetes Cluster
|
||||
The quickest way to set up a Kubernetes cluster to install Bitnami Charts is by following the "Bitnami Get Started" guides for the different services:
|
||||
|
||||
[Get Started with Bitnami Charts using the Amazon Elastic Container Service for Kubernetes (EKS)](https://docs.bitnami.com/kubernetes/get-started-eks/)
|
||||
|
||||
|
||||
### Installing the AutoMQ with Bitnami Chart
|
||||
|
||||
As an alternative to supplying the configuration parameters as arguments, you can create a supplemental YAML file containing your specific config parameters. Any parameters not specified in this file will default to those set in [values.yaml](values.yaml).
|
||||
|
||||
1. Create an empty `automq-values.yaml` file
|
||||
2. Edit the file with your specific parameters:
|
||||
|
||||
You can refer to the [demo-values.yaml](/chart/bitnami/demo-values.yaml) based on the bitnami [values.yaml](https://github.com/bitnami/charts/blob/main/bitnami/kafka/values.yaml)
|
||||
we provided for deploying AutoMQ on AWS across 3 Availability Zones using m7g.xlarge instances (4 vCPUs, 16GB Mem, 156MiB/s network bandwidth).
|
||||
|
||||
|
||||
You need to replace the bucket configurations in the placeholders ${}, such as ops-bucket, data-bucket, region, endpoint, access-key/secret-key.
|
||||
|
||||
3. Install or upgrade the AutoMQ Helm chart using your custom yaml file:
|
||||
|
||||
we recommend using the `--version` [31.x.x (31.1.0 ~ 31.5.0)](https://artifacthub.io/packages/helm/bitnami/kafka) bitnami helm chart while installing AutoMQ.
|
||||
|
||||
```shell
|
||||
helm install automq-release oci://registry-1.docker.io/bitnamicharts/kafka -f demo-values.yaml --version 31.5.0 --namespace automq --create-namespace
|
||||
```
|
||||
|
||||
### Upgrading
|
||||
|
||||
To upgrade the deployment:
|
||||
|
||||
```shell
|
||||
helm repo update
|
||||
helm upgrade automq-release oci://registry-1.docker.io/bitnamicharts/kafka -f demo-values.yaml --version 31.5.0 --namespace automq --create-namespace
|
||||
```
|
||||
|
||||
### Uninstalling the Chart
|
||||
|
||||
To uninstall/delete the deployment:
|
||||
|
||||
```shell
|
||||
helm uninstall automq-release --namespace automq
|
||||
```
|
||||
|
||||
This command removes all the Kubernetes components associated with the chart and deletes the release.
|
||||
|
|
@ -1,141 +0,0 @@
|
|||
global:
|
||||
security:
|
||||
allowInsecureImages: true
|
||||
image:
|
||||
registry: automqinc
|
||||
repository: automq
|
||||
tag: 1.5.0-bitnami
|
||||
pullPolicy: Always
|
||||
extraEnvVars:
|
||||
- name: AWS_ACCESS_KEY_ID
|
||||
value: "${access-key}"
|
||||
- name: AWS_SECRET_ACCESS_KEY
|
||||
value: "${secret-key}"
|
||||
controller:
|
||||
replicaCount: 3
|
||||
resources:
|
||||
requests:
|
||||
cpu: "3000m"
|
||||
memory: "12Gi"
|
||||
limits:
|
||||
cpu: "4000m"
|
||||
memory: "16Gi"
|
||||
heapOpts: -Xmx6g -Xms6g -XX:MaxDirectMemorySize=6g -XX:MetaspaceSize=96m
|
||||
extraConfig: |
|
||||
elasticstream.enable=true
|
||||
autobalancer.client.auth.sasl.mechanism=PLAIN
|
||||
autobalancer.client.auth.sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required username="inter_broker_user" password="interbroker-password-placeholder" user_inter_broker_user="interbroker-password-placeholder";
|
||||
autobalancer.client.auth.security.protocol=SASL_PLAINTEXT
|
||||
autobalancer.client.listener.name=INTERNAL
|
||||
s3.wal.cache.size=2147483648
|
||||
s3.block.cache.size=1073741824
|
||||
s3.stream.allocator.policy=POOLED_DIRECT
|
||||
s3.network.baseline.bandwidth=245366784
|
||||
# Replace the following with your bucket config
|
||||
s3.ops.buckets=1@s3://${ops-bucket}?region=${region}&endpoint=${endpoint}
|
||||
s3.data.buckets=0@s3://${data-bucket}?region=${region}&endpoint=${endpoint}
|
||||
s3.wal.path=0@s3://${data-bucket}?region=${region}&endpoint=${endpoint}
|
||||
automq.zonerouter.channels=0@s3://${data-bucket}?region=${region}&endpoint=${endpoint}
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchExpressions:
|
||||
- key: app.kubernetes.io/instance
|
||||
operator: In
|
||||
# your helm release name
|
||||
values:
|
||||
- automq-release
|
||||
- key: app.kubernetes.io/component
|
||||
operator: In
|
||||
values:
|
||||
- controller-eligible
|
||||
- broker
|
||||
topologyKey: kubernetes.io/hostname
|
||||
# --- nodeAffinity recommended ---
|
||||
# nodeAffinity:
|
||||
# requiredDuringSchedulingIgnoredDuringExecution:
|
||||
# nodeSelectorTerms:
|
||||
# - matchExpressions:
|
||||
# - key: "${your-node-label-key}"
|
||||
# operator: In
|
||||
# values:
|
||||
# - "${your-node-label-value}"
|
||||
topologySpreadConstraints:
|
||||
- maxSkew: 1
|
||||
topologyKey: topology.kubernetes.io/zone
|
||||
whenUnsatisfiable: DoNotSchedule
|
||||
labelSelector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/component: controller-eligible
|
||||
tolerations:
|
||||
- key: "dedicated"
|
||||
operator: "Equal"
|
||||
value: "automq"
|
||||
effect: "NoSchedule"
|
||||
persistence:
|
||||
size: 20Gi
|
||||
|
||||
broker:
|
||||
replicaCount: 3
|
||||
resources:
|
||||
requests:
|
||||
cpu: "3000m"
|
||||
memory: "12Gi"
|
||||
limits:
|
||||
cpu: "4000m"
|
||||
memory: "16Gi"
|
||||
heapOpts: -Xmx6g -Xms6g -XX:MaxDirectMemorySize=6g -XX:MetaspaceSize=96m
|
||||
extraConfig: |
|
||||
elasticstream.enable=true
|
||||
autobalancer.client.auth.sasl.mechanism=PLAIN
|
||||
autobalancer.client.auth.sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required username="inter_broker_user" password="interbroker-password-placeholder" user_inter_broker_user="interbroker-password-placeholder";
|
||||
autobalancer.client.auth.security.protocol=SASL_PLAINTEXT
|
||||
autobalancer.client.listener.name=INTERNAL
|
||||
s3.wal.cache.size=2147483648
|
||||
s3.block.cache.size=1073741824
|
||||
s3.stream.allocator.policy=POOLED_DIRECT
|
||||
s3.network.baseline.bandwidth=245366784
|
||||
# Replace the following with your bucket config
|
||||
s3.ops.buckets=1@s3://${ops-bucket}?region=${region}&endpoint=${endpoint}
|
||||
s3.data.buckets=0@s3://${data-bucket}?region=${region}&endpoint=${endpoint}
|
||||
s3.wal.path=0@s3://${data-bucket}?region=${region}&endpoint=${endpoint}
|
||||
automq.zonerouter.channels=0@s3://${data-bucket}?region=${region}&endpoint=${endpoint}
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchExpressions:
|
||||
- key: app.kubernetes.io/instance
|
||||
operator: In
|
||||
# your helm release name
|
||||
values:
|
||||
- automq-release
|
||||
- key: app.kubernetes.io/component
|
||||
operator: In
|
||||
values:
|
||||
- controller-eligible
|
||||
- broker
|
||||
topologyKey: kubernetes.io/hostname
|
||||
# --- nodeAffinity recommended ---
|
||||
# nodeAffinity:
|
||||
# requiredDuringSchedulingIgnoredDuringExecution:
|
||||
# nodeSelectorTerms:
|
||||
# - matchExpressions:
|
||||
# - key: "${your-node-label-key}"
|
||||
# operator: In
|
||||
# values:
|
||||
# - "${your-node-label-value}"
|
||||
topologySpreadConstraints:
|
||||
- maxSkew: 1
|
||||
topologyKey: topology.kubernetes.io/zone
|
||||
whenUnsatisfiable: DoNotSchedule
|
||||
labelSelector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/component: broker
|
||||
tolerations:
|
||||
- key: "dedicated"
|
||||
operator: "Equal"
|
||||
value: "automq"
|
||||
effect: "NoSchedule"
|
||||
brokerRackAssignment: aws-az
|
||||
|
|
@ -378,6 +378,5 @@
|
|||
<suppress id="dontUseSystemExit"
|
||||
files="(BenchTool|S3Utils|AutoMQCLI).java"/>
|
||||
<suppress checks="ClassDataAbstractionCoupling" files="(StreamControlManagerTest|ControllerStreamManager).java"/>
|
||||
<suppress files="core[\/]src[\/]test[\/]java[\/]kafka[\/]automq[\/]table[\/]process[\/]proto[\/].*\.java$" checks=".*"/>
|
||||
|
||||
</suppressions>
|
||||
|
|
|
|||
|
|
@ -264,51 +264,8 @@ public class TopicConfig {
|
|||
public static final String TABLE_TOPIC_COMMIT_INTERVAL_DOC = "The table topic commit interval(ms)";
|
||||
public static final String TABLE_TOPIC_NAMESPACE_CONFIG = "automq.table.topic.namespace";
|
||||
public static final String TABLE_TOPIC_NAMESPACE_DOC = "The table topic table namespace";
|
||||
|
||||
public static final String TABLE_TOPIC_SCHEMA_TYPE_CONFIG = "automq.table.topic.schema.type";
|
||||
public static final String TABLE_TOPIC_SCHEMA_TYPE_DOC = "[DEPRECATED] The table topic schema type configuration. " +
|
||||
"This configuration is deprecated and will be removed in a future release. " +
|
||||
"Please use the new separate converter and transform configurations instead. " +
|
||||
"Supported values: 'schemaless' (maps to convert.value.type=raw, transform.value.type=none), " +
|
||||
"'schema' (maps to convert.value.type=by_schema_id, transform.value.type=flatten).";
|
||||
|
||||
public static final String AUTOMQ_TABLE_TOPIC_CONVERT_VALUE_TYPE_CONFIG = "automq.table.topic.convert.value.type";
|
||||
public static final String AUTOMQ_TABLE_TOPIC_CONVERT_VALUE_TYPE_DOC = "How to parse Kafka record values. " +
|
||||
"Supported: 'raw', 'string', 'by_schema_id', 'by_latest_schema'. " +
|
||||
"Schema Registry URL required for 'by_schema_id' and 'by_latest_schema'.";
|
||||
public static final String AUTOMQ_TABLE_TOPIC_CONVERT_KEY_TYPE_CONFIG = "automq.table.topic.convert.key.type";
|
||||
public static final String AUTOMQ_TABLE_TOPIC_CONVERT_KEY_TYPE_DOC = "How to parse Kafka record keys. " +
|
||||
"Supported: 'raw', 'string', 'by_schema_id', 'by_latest_schema'. " +
|
||||
"Schema Registry URL required for 'by_schema_id' and 'by_latest_schema'.";
|
||||
|
||||
public static final String AUTOMQ_TABLE_TOPIC_CONVERT_VALUE_BY_LATEST_SCHEMA_SUBJECT_CONFIG =
|
||||
"automq.table.topic.convert.value.by_latest_schema.subject";
|
||||
public static final String AUTOMQ_TABLE_TOPIC_CONVERT_VALUE_BY_LATEST_SCHEMA_SUBJECT_DOC =
|
||||
"Subject name to resolve the latest value schema from Schema Registry when using convert.value.type=by_latest_schema. " +
|
||||
"If not set, defaults to '<topic>-value'.";
|
||||
public static final String AUTOMQ_TABLE_TOPIC_CONVERT_VALUE_BY_LATEST_SCHEMA_MESSAGE_FULL_NAME_CONFIG =
|
||||
"automq.table.topic.convert.value.by_latest_schema.message.full.name";
|
||||
public static final String AUTOMQ_TABLE_TOPIC_CONVERT_VALUE_BY_LATEST_SCHEMA_MESSAGE_FULL_NAME_DOC =
|
||||
"Fully-qualified message name for the latest value schema (if using Protobuf) when convert.value.type=by_latest_schema." +
|
||||
"If not set, uses the first message.";
|
||||
|
||||
public static final String AUTOMQ_TABLE_TOPIC_CONVERT_KEY_BY_LATEST_SCHEMA_SUBJECT_CONFIG =
|
||||
"automq.table.topic.convert.key.by_latest_schema.subject";
|
||||
public static final String AUTOMQ_TABLE_TOPIC_CONVERT_KEY_BY_LATEST_SCHEMA_SUBJECT_DOC =
|
||||
"Subject name to resolve the latest key schema from Schema Registry when using convert.key.type=by_latest_schema. " +
|
||||
"If not set, defaults to '<topic>-key'.";
|
||||
public static final String AUTOMQ_TABLE_TOPIC_CONVERT_KEY_BY_LATEST_SCHEMA_MESSAGE_FULL_NAME_CONFIG =
|
||||
"automq.table.topic.convert.key.by_latest_schema.message.full.name";
|
||||
public static final String AUTOMQ_TABLE_TOPIC_CONVERT_KEY_BY_LATEST_SCHEMA_MESSAGE_FULL_NAME_DOC =
|
||||
"Fully-qualified message name for the latest key schema (if using Protobuf) when convert.key.type=by_latest_schema. " +
|
||||
"If not set, uses the first message.";
|
||||
|
||||
public static final String AUTOMQ_TABLE_TOPIC_TRANSFORM_VALUE_TYPE_CONFIG = "automq.table.topic.transform.value.type";
|
||||
public static final String AUTOMQ_TABLE_TOPIC_TRANSFORM_VALUE_TYPE_DOC = "Transformation to apply to the record value after conversion. " +
|
||||
"Supported: 'none', 'flatten' (extract fields from structured records), " +
|
||||
"'flatten_debezium' (process Debezium CDC events). " +
|
||||
"Note: 'flatten_debezium' requires schema-based conversion.";
|
||||
|
||||
public static final String TABLE_TOPIC_SCHEMA_TYPE_DOC = "The table topic schema type, support schemaless, schema";
|
||||
public static final String TABLE_TOPIC_ID_COLUMNS_CONFIG = "automq.table.topic.id.columns";
|
||||
public static final String TABLE_TOPIC_ID_COLUMNS_DOC = "The primary key, comma-separated list of columns that identify a row in tables."
|
||||
+ "ex. [region, name]";
|
||||
|
|
@ -319,21 +276,6 @@ public class TopicConfig {
|
|||
public static final String TABLE_TOPIC_CDC_FIELD_CONFIG = "automq.table.topic.cdc.field";
|
||||
public static final String TABLE_TOPIC_CDC_FIELD_DOC = "The name of the field containing the CDC operation, I, U, or D";
|
||||
|
||||
public static final String AUTOMQ_TABLE_TOPIC_ERRORS_TOLERANCE_CONFIG = "automq.table.topic.errors.tolerance";
|
||||
public static final String AUTOMQ_TABLE_TOPIC_ERRORS_TOLERANCE_DOC = "Configures the error handling strategy for table topic record processing. Valid values are <code>none</code>, <code>invalid_data</code>, and <code>all</code>.";
|
||||
|
||||
public static final String AUTOMQ_TABLE_TOPIC_EXPIRE_SNAPSHOT_ENABLED_CONFIG = "automq.table.topic.expire.snapshot.enabled";
|
||||
public static final String AUTOMQ_TABLE_TOPIC_EXPIRE_SNAPSHOT_ENABLED_DOC = "Enable/disable automatic snapshot expiration.";
|
||||
public static final boolean AUTOMQ_TABLE_TOPIC_EXPIRE_SNAPSHOT_ENABLED_DEFAULT = true;
|
||||
|
||||
public static final String AUTOMQ_TABLE_TOPIC_EXPIRE_SNAPSHOT_OLDER_THAN_HOURS_CONFIG = "automq.table.topic.expire.snapshot.older.than.hours";
|
||||
public static final String AUTOMQ_TABLE_TOPIC_EXPIRE_SNAPSHOT_OLDER_THAN_HOURS_DOC = "Set retention duration in hours.";
|
||||
public static final int AUTOMQ_TABLE_TOPIC_EXPIRE_SNAPSHOT_OLDER_THAN_HOURS_DEFAULT = 1;
|
||||
|
||||
public static final String AUTOMQ_TABLE_TOPIC_EXPIRE_SNAPSHOT_RETAIN_LAST_CONFIG = "automq.table.topic.expire.snapshot.retain.last";
|
||||
public static final String AUTOMQ_TABLE_TOPIC_EXPIRE_SNAPSHOT_RETAIN_LAST_DOC = "Minimum snapshots to retain.";
|
||||
public static final int AUTOMQ_TABLE_TOPIC_EXPIRE_SNAPSHOT_RETAIN_LAST_DEFAULT = 1;
|
||||
|
||||
public static final String KAFKA_LINKS_ID_CONFIG = "automq.kafka.links.id";
|
||||
public static final String KAFKA_LINKS_ID_DOC = "The unique id of a kafka link";
|
||||
public static final String KAFKA_LINKS_TOPIC_START_TIME_CONFIG = "automq.kafka.links.topic.start.time";
|
||||
|
|
|
|||
|
|
@ -39,22 +39,6 @@ public enum TimestampType {
|
|||
throw new NoSuchElementException("Invalid timestamp type " + name);
|
||||
}
|
||||
|
||||
public static TimestampType forId(int id) {
|
||||
switch (id) {
|
||||
case -1: {
|
||||
return NO_TIMESTAMP_TYPE;
|
||||
}
|
||||
case 0: {
|
||||
return CREATE_TIME;
|
||||
}
|
||||
case 1: {
|
||||
return LOG_APPEND_TIME;
|
||||
}
|
||||
default:
|
||||
throw new IllegalArgumentException("Invalid timestamp type " + id);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return name;
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@
|
|||
"broker"
|
||||
],
|
||||
"name": "AutomqGetPartitionSnapshotRequest",
|
||||
"validVersions": "0-2",
|
||||
"validVersions": "0",
|
||||
"flexibleVersions": "0+",
|
||||
"fields": [
|
||||
{
|
||||
|
|
@ -34,18 +34,6 @@
|
|||
"type": "int32",
|
||||
"versions": "0+",
|
||||
"about": "The get session epoch, which is used for ordering requests in a session"
|
||||
},
|
||||
{
|
||||
"name": "RequestCommit",
|
||||
"type": "bool",
|
||||
"versions": "1+",
|
||||
"about": "Request commit the ConfirmWAL data to the main storage."
|
||||
},
|
||||
{
|
||||
"name": "Version",
|
||||
"type": "int16",
|
||||
"versions": "1+",
|
||||
"about": "The route request version"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -17,7 +17,7 @@
|
|||
"apiKey": 516,
|
||||
"type": "response",
|
||||
"name": "AutomqGetPartitionSnapshotResponse",
|
||||
"validVersions": "0-2",
|
||||
"validVersions": "0",
|
||||
"flexibleVersions": "0+",
|
||||
"fields": [
|
||||
{ "name": "ErrorCode", "type": "int16", "versions": "0+", "about": "The top level response error code" },
|
||||
|
|
@ -36,29 +36,9 @@
|
|||
{ "name": "StreamMetadata", "type": "[]StreamMetadata", "versions": "0+", "nullableVersions": "0+", "fields": [
|
||||
{ "name": "StreamId", "type": "int64", "versions": "0+", "about": "The streamId" },
|
||||
{ "name": "EndOffset", "type": "int64", "versions": "0+", "about": "The stream end offset" }
|
||||
]},
|
||||
{ "name": "LastTimestampOffset", "type": "TimestampOffsetData", "versions": "1+", "nullableVersions": "1+", "about": "The last segment's last time index" }
|
||||
]}
|
||||
]}
|
||||
]},
|
||||
{
|
||||
"name": "ConfirmWalEndOffset",
|
||||
"type": "bytes",
|
||||
"versions": "1+",
|
||||
"about": "The confirm WAL end offset."
|
||||
},
|
||||
{
|
||||
"name": "ConfirmWalConfig",
|
||||
"type": "string",
|
||||
"versions": "1+",
|
||||
"about": "The confirm WAL config."
|
||||
},
|
||||
{
|
||||
"name": "ConfirmWalDeltaData",
|
||||
"type": "bytes",
|
||||
"versions": "2+",
|
||||
"nullableVersions": "2+",
|
||||
"about": "The confirm WAL delta data between two end offsets. It's an optional field. If not present, the client should read the delta from WAL"
|
||||
}
|
||||
]}
|
||||
],
|
||||
"commonStructs": [
|
||||
{ "name": "LogMetadata", "versions": "0+", "fields": [
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@
|
|||
"broker"
|
||||
],
|
||||
"name": "AutomqZoneRouterRequest",
|
||||
"validVersions": "0-1",
|
||||
"validVersions": "0",
|
||||
"flexibleVersions": "0+",
|
||||
"fields": [
|
||||
{
|
||||
|
|
@ -28,18 +28,6 @@
|
|||
"type": "bytes",
|
||||
"versions": "0+",
|
||||
"about": "The router metadata"
|
||||
},
|
||||
{
|
||||
"name": "RouteEpoch",
|
||||
"type": "int64",
|
||||
"versions": "1+",
|
||||
"about": "The route requests epoch"
|
||||
},
|
||||
{
|
||||
"name": "Version",
|
||||
"type": "int16",
|
||||
"versions": "1+",
|
||||
"about": "The route request version"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -17,7 +17,7 @@
|
|||
"apiKey": 515,
|
||||
"type": "response",
|
||||
"name": "AutomqZoneRouterResponse",
|
||||
"validVersions": "0-1",
|
||||
"validVersions": "0",
|
||||
"flexibleVersions": "0+",
|
||||
"fields": [
|
||||
{
|
||||
|
|
|
|||
|
|
@ -24,8 +24,7 @@ log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
|
|||
# location of the log files (e.g. ${kafka.logs.dir}/connect.log). The `MaxFileSize` option specifies the maximum size of the log file,
|
||||
# and the `MaxBackupIndex` option specifies the number of backup files to keep.
|
||||
#
|
||||
log4j.appender.connectAppender=com.automq.log.S3RollingFileAppender
|
||||
log4j.appender.connectAppender.configProviderClass=org.apache.kafka.connect.automq.log.ConnectS3LogConfigProvider
|
||||
log4j.appender.connectAppender=org.apache.log4j.RollingFileAppender
|
||||
log4j.appender.connectAppender.MaxFileSize=10MB
|
||||
log4j.appender.connectAppender.MaxBackupIndex=11
|
||||
log4j.appender.connectAppender.File=${kafka.logs.dir}/connect.log
|
||||
|
|
|
|||
|
|
@ -143,8 +143,8 @@ s3.data.buckets=0@s3://ko3?region=us-east-1
|
|||
# The ops buckets
|
||||
s3.ops.buckets=0@s3://ko3?region=us-east-1
|
||||
|
||||
# The wal storage config
|
||||
s3.wal.path=0@s3://ko3?region=us-east-1
|
||||
# The file path of delta WAL in block device
|
||||
s3.wal.path=0@file:///tmp/kraft-broker-logs/s3wal?capacity=2147483648
|
||||
|
||||
# The maximum size of WAL cache can use, default 2GB
|
||||
# s3.wal.cache.size=2147483648
|
||||
|
|
|
|||
|
|
@ -146,8 +146,8 @@ s3.data.buckets=0@s3://ko3?region=us-east-1
|
|||
# The ops buckets
|
||||
s3.ops.buckets=0@s3://ko3?region=us-east-1
|
||||
|
||||
# The wal storage config
|
||||
s3.wal.path=0@s3://ko3?region=us-east-1
|
||||
# The file path of delta WAL in block device
|
||||
s3.wal.path=0@file:///tmp/kraft-broker-logs/s3wal?capacity=2147483648
|
||||
|
||||
# The maximum size of WAL cache can use, default 2GB
|
||||
# s3.wal.cache.size=2147483648
|
||||
|
|
|
|||
|
|
@ -21,73 +21,70 @@ log4j.appender.stdout=org.apache.log4j.ConsoleAppender
|
|||
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n
|
||||
|
||||
log4j.logger.com.automq.log.S3RollingFileAppender=INFO, stdout
|
||||
log4j.additivity.com.automq.log.S3RollingFileAppender=false
|
||||
|
||||
log4j.appender.kafkaAppender=com.automq.log.S3RollingFileAppender
|
||||
log4j.appender.kafkaAppender=com.automq.shell.log.S3RollingFileAppender
|
||||
log4j.appender.kafkaAppender.MaxFileSize=100MB
|
||||
log4j.appender.kafkaAppender.MaxBackupIndex=14
|
||||
log4j.appender.kafkaAppender.File=${kafka.logs.dir}/server.log
|
||||
log4j.appender.kafkaAppender.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.kafkaAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
|
||||
|
||||
log4j.appender.stateChangeAppender=com.automq.log.S3RollingFileAppender
|
||||
log4j.appender.stateChangeAppender=com.automq.shell.log.S3RollingFileAppender
|
||||
log4j.appender.stateChangeAppender.MaxFileSize=10MB
|
||||
log4j.appender.stateChangeAppender.MaxBackupIndex=11
|
||||
log4j.appender.stateChangeAppender.File=${kafka.logs.dir}/state-change.log
|
||||
log4j.appender.stateChangeAppender.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.stateChangeAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
|
||||
|
||||
log4j.appender.requestAppender=com.automq.log.S3RollingFileAppender
|
||||
log4j.appender.requestAppender=com.automq.shell.log.S3RollingFileAppender
|
||||
log4j.appender.requestAppender.MaxFileSize=10MB
|
||||
log4j.appender.requestAppender.MaxBackupIndex=11
|
||||
log4j.appender.requestAppender.File=${kafka.logs.dir}/kafka-request.log
|
||||
log4j.appender.requestAppender.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.requestAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
|
||||
|
||||
log4j.appender.cleanerAppender=com.automq.log.S3RollingFileAppender
|
||||
log4j.appender.cleanerAppender=com.automq.shell.log.S3RollingFileAppender
|
||||
log4j.appender.cleanerAppender.MaxFileSize=10MB
|
||||
log4j.appender.cleanerAppender.MaxBackupIndex=11
|
||||
log4j.appender.cleanerAppender.File=${kafka.logs.dir}/log-cleaner.log
|
||||
log4j.appender.cleanerAppender.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.cleanerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
|
||||
|
||||
log4j.appender.controllerAppender=com.automq.log.S3RollingFileAppender
|
||||
log4j.appender.controllerAppender=com.automq.shell.log.S3RollingFileAppender
|
||||
log4j.appender.controllerAppender.MaxFileSize=100MB
|
||||
log4j.appender.controllerAppender.MaxBackupIndex=14
|
||||
log4j.appender.controllerAppender.File=${kafka.logs.dir}/controller.log
|
||||
log4j.appender.controllerAppender.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.controllerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
|
||||
|
||||
log4j.appender.authorizerAppender=com.automq.log.S3RollingFileAppender
|
||||
log4j.appender.authorizerAppender=com.automq.shell.log.S3RollingFileAppender
|
||||
log4j.appender.authorizerAppender.MaxFileSize=10MB
|
||||
log4j.appender.authorizerAppender.MaxBackupIndex=11
|
||||
log4j.appender.authorizerAppender.File=${kafka.logs.dir}/kafka-authorizer.log
|
||||
log4j.appender.authorizerAppender.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.authorizerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
|
||||
|
||||
log4j.appender.s3ObjectAppender=com.automq.log.S3RollingFileAppender
|
||||
log4j.appender.s3ObjectAppender=com.automq.shell.log.S3RollingFileAppender
|
||||
log4j.appender.s3ObjectAppender.MaxFileSize=100MB
|
||||
log4j.appender.s3ObjectAppender.MaxBackupIndex=14
|
||||
log4j.appender.s3ObjectAppender.File=${kafka.logs.dir}/s3-object.log
|
||||
log4j.appender.s3ObjectAppender.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.s3ObjectAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
|
||||
|
||||
log4j.appender.s3StreamMetricsAppender=com.automq.log.S3RollingFileAppender
|
||||
log4j.appender.s3StreamMetricsAppender=com.automq.shell.log.S3RollingFileAppender
|
||||
log4j.appender.s3StreamMetricsAppender.MaxFileSize=10MB
|
||||
log4j.appender.s3StreamMetricsAppender.MaxBackupIndex=11
|
||||
log4j.appender.s3StreamMetricsAppender.File=${kafka.logs.dir}/s3stream-metrics.log
|
||||
log4j.appender.s3StreamMetricsAppender.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.s3StreamMetricsAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
|
||||
|
||||
log4j.appender.s3StreamThreadPoolAppender=com.automq.log.S3RollingFileAppender
|
||||
log4j.appender.s3StreamThreadPoolAppender=com.automq.shell.log.S3RollingFileAppender
|
||||
log4j.appender.s3StreamThreadPoolAppender.MaxFileSize=10MB
|
||||
log4j.appender.s3StreamThreadPoolAppender.MaxBackupIndex=11
|
||||
log4j.appender.s3StreamThreadPoolAppender.File=${kafka.logs.dir}/s3stream-threads.log
|
||||
log4j.appender.s3StreamThreadPoolAppender.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.s3StreamThreadPoolAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
|
||||
|
||||
log4j.appender.autoBalancerAppender=com.automq.log.S3RollingFileAppender
|
||||
log4j.appender.autoBalancerAppender=com.automq.shell.log.S3RollingFileAppender
|
||||
log4j.appender.autoBalancerAppender.MaxFileSize=10MB
|
||||
log4j.appender.autoBalancerAppender.MaxBackupIndex=11
|
||||
log4j.appender.autoBalancerAppender.File=${kafka.logs.dir}/auto-balancer.log
|
||||
|
|
@ -148,6 +145,3 @@ log4j.additivity.kafka.authorizer.logger=false
|
|||
|
||||
log4j.logger.kafka.autobalancer=INFO, autoBalancerAppender
|
||||
log4j.additivity.kafka.autobalancer=false
|
||||
|
||||
log4j.logger.org.apache.kafka.clients=INFO, kafkaAppender
|
||||
log4j.additivity.org.apache.kafka.clients=false
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
log4j.rootLogger=ERROR, stdout, perfAppender
|
||||
log4j.rootLogger=INFO, stdout, perfAppender
|
||||
|
||||
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
|
||||
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
|
||||
|
|
@ -26,15 +26,7 @@ log4j.appender.perfAppender.File=${kafka.logs.dir}/perf.log
|
|||
log4j.appender.perfAppender.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.perfAppender.layout.ConversionPattern=%d -%5p [%15.15t] %m (%c#%M:%L)%n
|
||||
|
||||
log4j.appender.clientAppender=org.apache.log4j.RollingFileAppender
|
||||
log4j.appender.clientAppender.MaxFileSize=100MB
|
||||
log4j.appender.clientAppender.MaxBackupIndex=10
|
||||
log4j.appender.clientAppender.File=${kafka.logs.dir}/client.log
|
||||
log4j.appender.clientAppender.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.clientAppender.layout.ConversionPattern=%d -%5p [%15.15t] %m (%c#%M:%L)%n
|
||||
log4j.logger.org.apache.kafka=INFO, perfAppender
|
||||
log4j.additivity.org.apache.kafka=false
|
||||
|
||||
log4j.logger.org.apache.kafka.tools.automq=INFO, stdout, perfAppender
|
||||
log4j.additivity.org.apache.kafka.tools.automq=false
|
||||
|
||||
log4j.logger.org.apache.kafka.clients=INFO, clientAppender
|
||||
log4j.additivity.org.apache.kafka.clients=false
|
||||
|
|
|
|||
|
|
@ -1,221 +0,0 @@
|
|||
# Kafka Connect OpenTelemetry Metrics Integration
|
||||
|
||||
## Overview
|
||||
|
||||
This integration allows Kafka Connect to export metrics through the AutoMQ OpenTelemetry module, enabling unified observability across your Kafka ecosystem.
|
||||
|
||||
## Configuration
|
||||
|
||||
### 1. Enable the MetricsReporter
|
||||
|
||||
Add the following to your Kafka Connect configuration file (`connect-distributed.properties` or `connect-standalone.properties`):
|
||||
|
||||
```properties
|
||||
# Enable OpenTelemetry MetricsReporter
|
||||
metric.reporters=org.apache.kafka.connect.automq.metrics.OpenTelemetryMetricsReporter
|
||||
|
||||
# OpenTelemetry configuration
|
||||
opentelemetry.metrics.enabled=true
|
||||
opentelemetry.metrics.prefix=kafka.connect
|
||||
|
||||
# Optional: Filter metrics
|
||||
opentelemetry.metrics.include.pattern=.*connector.*|.*task.*|.*worker.*
|
||||
opentelemetry.metrics.exclude.pattern=.*jmx.*|.*debug.*
|
||||
```
|
||||
|
||||
### 2. AutoMQ Telemetry Configuration
|
||||
|
||||
Ensure the AutoMQ telemetry is properly configured. Add these properties to your application configuration:
|
||||
|
||||
```properties
|
||||
# Telemetry export configuration
|
||||
automq.telemetry.exporter.uri=prometheus://localhost:9090
|
||||
# or for OTLP: automq.telemetry.exporter.uri=otlp://localhost:4317
|
||||
|
||||
# Service identification
|
||||
service.name=kafka-connect
|
||||
service.instance.id=connect-worker-1
|
||||
|
||||
# Export settings
|
||||
automq.telemetry.exporter.interval.ms=30000
|
||||
automq.telemetry.metric.cardinality.limit=10000
|
||||
```
|
||||
|
||||
## S3 Log Upload
|
||||
|
||||
Kafka Connect bundles the AutoMQ log uploader so that worker logs can be streamed to S3 together with in-cluster cleanup. The uploader uses the connect-leader election mechanism by default and requires no additional configuration.
|
||||
|
||||
### Worker Configuration
|
||||
|
||||
Add the following properties to your worker configuration (ConfigMap, properties file, etc.):
|
||||
|
||||
```properties
|
||||
# Enable S3 log upload
|
||||
log.s3.enable=true
|
||||
log.s3.bucket=0@s3://your-log-bucket?region=us-east-1
|
||||
|
||||
# Optional overrides (defaults shown)
|
||||
log.s3.selector.type=connect-leader
|
||||
# Provide credentials if the bucket URI does not embed them
|
||||
# log.s3.access.key=...
|
||||
# log.s3.secret.key=...
|
||||
```
|
||||
|
||||
`log.s3.node.id` defaults to a hash of the pod hostname if not provided, ensuring objects are partitioned per worker.
|
||||
|
||||
### Log4j Integration
|
||||
|
||||
`config/connect-log4j.properties` has switched `connectAppender` to `com.automq.log.S3RollingFileAppender` and specifies `org.apache.kafka.connect.automq.log.ConnectS3LogConfigProvider` as the config provider. As long as you enable `log.s3.enable=true` and configure the bucket info in the worker config, log upload will be automatically initialized with the Connect process; if not set or returns `log.s3.enable=false`, the uploader remains disabled.
|
||||
|
||||
## Programmatic Usage
|
||||
|
||||
### 1. Initialize Telemetry Manager
|
||||
|
||||
```java
|
||||
import com.automq.opentelemetry.AutoMQTelemetryManager;
|
||||
import java.util.Properties;
|
||||
|
||||
// Initialize AutoMQ telemetry before starting Kafka Connect
|
||||
Properties telemetryProps = new Properties();
|
||||
telemetryProps.setProperty("automq.telemetry.exporter.uri", "prometheus://localhost:9090");
|
||||
telemetryProps.setProperty("service.name", "kafka-connect");
|
||||
telemetryProps.setProperty("service.instance.id", "worker-1");
|
||||
|
||||
// Initialize singleton instance
|
||||
AutoMQTelemetryManager.initializeInstance(telemetryProps);
|
||||
|
||||
// Now start Kafka Connect - it will automatically use the OpenTelemetryMetricsReporter
|
||||
```
|
||||
|
||||
### 2. Shutdown
|
||||
|
||||
```java
|
||||
// When shutting down your application
|
||||
AutoMQTelemetryManager.shutdownInstance();
|
||||
```
|
||||
|
||||
## Exported Metrics
|
||||
|
||||
The integration automatically converts Kafka Connect metrics to OpenTelemetry format:
|
||||
|
||||
### Metric Naming Convention
|
||||
- **Format**: `kafka.connect.{group}.{metric_name}`
|
||||
- **Example**: `kafka.connect.connector.task.batch.size.avg` → `kafka.connect.connector_task_batch_size_avg`
|
||||
|
||||
### Metric Types
|
||||
- **Counters**: Metrics containing "total", "count", "error", "failure"
|
||||
- **Gauges**: All other numeric metrics (rates, averages, sizes, etc.)
|
||||
|
||||
### Attributes
|
||||
Kafka metric tags are converted to OpenTelemetry attributes:
|
||||
- `connector` → `connector`
|
||||
- `task` → `task`
|
||||
- `worker-id` → `worker_id`
|
||||
- Plus standard attributes: `metric.group`, `service.name`, `service.instance.id`
|
||||
|
||||
## Example Metrics
|
||||
|
||||
Common Kafka Connect metrics that will be exported:
|
||||
|
||||
```
|
||||
# Connector metrics
|
||||
kafka.connect.connector.startup.attempts.total
|
||||
kafka.connect.connector.startup.success.total
|
||||
kafka.connect.connector.startup.failure.total
|
||||
|
||||
# Task metrics
|
||||
kafka.connect.connector.task.batch.size.avg
|
||||
kafka.connect.connector.task.batch.size.max
|
||||
kafka.connect.connector.task.offset.commit.avg.time.ms
|
||||
|
||||
# Worker metrics
|
||||
kafka.connect.worker.connector.count
|
||||
kafka.connect.worker.task.count
|
||||
kafka.connect.worker.connector.startup.attempts.total
|
||||
```
|
||||
|
||||
## Configuration Options
|
||||
|
||||
### OpenTelemetry MetricsReporter Options
|
||||
|
||||
| Property | Description | Default | Example |
|
||||
|----------|-------------|---------|---------|
|
||||
| `opentelemetry.metrics.enabled` | Enable/disable metrics export | `true` | `false` |
|
||||
| `opentelemetry.metrics.prefix` | Metric name prefix | `kafka.connect` | `my.connect` |
|
||||
| `opentelemetry.metrics.include.pattern` | Regex for included metrics | All metrics | `.*connector.*` |
|
||||
| `opentelemetry.metrics.exclude.pattern` | Regex for excluded metrics | None | `.*jmx.*` |
|
||||
|
||||
### AutoMQ Telemetry Options
|
||||
|
||||
| Property | Description | Default |
|
||||
|----------|-------------|---------|
|
||||
| `automq.telemetry.exporter.uri` | Exporter endpoint | Empty |
|
||||
| `automq.telemetry.exporter.interval.ms` | Export interval | `60000` |
|
||||
| `automq.telemetry.metric.cardinality.limit` | Max metric cardinality | `20000` |
|
||||
|
||||
## Monitoring Examples
|
||||
|
||||
### Prometheus Queries
|
||||
|
||||
```promql
|
||||
# Connector count by worker
|
||||
kafka_connect_worker_connector_count
|
||||
|
||||
# Task failure rate
|
||||
rate(kafka_connect_connector_task_startup_failure_total[5m])
|
||||
|
||||
# Average batch processing time
|
||||
kafka_connect_connector_task_batch_size_avg
|
||||
|
||||
# Connector startup success rate
|
||||
rate(kafka_connect_connector_startup_success_total[5m]) /
|
||||
rate(kafka_connect_connector_startup_attempts_total[5m])
|
||||
```
|
||||
|
||||
### Grafana Dashboard
|
||||
|
||||
Common panels to create:
|
||||
|
||||
1. **Connector Health**: Count of running/failed connectors
|
||||
2. **Task Performance**: Batch size, processing time, throughput
|
||||
3. **Error Rates**: Failed startups, task failures
|
||||
4. **Resource Usage**: Combined with JVM metrics from AutoMQ telemetry
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
1. **Metrics not appearing**
|
||||
```
|
||||
Check logs for: "AutoMQTelemetryManager is not initialized"
|
||||
Solution: Ensure AutoMQTelemetryManager.initializeInstance() is called before Connect starts
|
||||
```
|
||||
|
||||
2. **High cardinality warnings**
|
||||
```
|
||||
Solution: Use include/exclude patterns to filter metrics
|
||||
```
|
||||
|
||||
3. **Missing dependencies**
|
||||
```
|
||||
Ensure connect-runtime depends on the opentelemetry module
|
||||
```
|
||||
|
||||
### Debug Logging
|
||||
|
||||
Enable debug logging to troubleshoot:
|
||||
|
||||
```properties
|
||||
log4j.logger.org.apache.kafka.connect.automq=DEBUG
|
||||
log4j.logger.com.automq.opentelemetry=DEBUG
|
||||
```
|
||||
|
||||
## Integration with Existing Monitoring
|
||||
|
||||
This integration works alongside:
|
||||
- Existing JMX metrics (not replaced)
|
||||
- Kafka broker metrics via AutoMQ telemetry
|
||||
- Application-specific metrics
|
||||
- Third-party monitoring tools
|
||||
|
||||
The OpenTelemetry integration provides a unified export path while preserving existing monitoring setups.
|
||||
|
|
@ -1,95 +0,0 @@
|
|||
/*
|
||||
* Copyright 2025, AutoMQ HK Limited.
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.kafka.connect.automq.az;
|
||||
|
||||
import org.apache.kafka.clients.CommonClientConfigs;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.net.URLEncoder;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
|
||||
public final class AzAwareClientConfigurator {
|
||||
private static final Logger LOGGER = LoggerFactory.getLogger(AzAwareClientConfigurator.class);
|
||||
|
||||
private AzAwareClientConfigurator() {
|
||||
}
|
||||
|
||||
public enum ClientFamily {
|
||||
PRODUCER,
|
||||
CONSUMER,
|
||||
ADMIN
|
||||
}
|
||||
|
||||
public static void maybeApplyAz(Map<String, Object> props, ClientFamily family, String roleDescriptor) {
|
||||
Optional<String> azOpt = AzMetadataProviderHolder.provider().availabilityZoneId();
|
||||
LOGGER.info("AZ-aware client.id configuration for role {}: resolved availability zone id '{}'",
|
||||
roleDescriptor, azOpt.orElse("unknown"));
|
||||
if (azOpt.isEmpty()) {
|
||||
LOGGER.info("Skipping AZ-aware client.id configuration for role {} as no availability zone id is available",
|
||||
roleDescriptor);
|
||||
return;
|
||||
}
|
||||
|
||||
String az = azOpt.get();
|
||||
|
||||
String encodedAz = URLEncoder.encode(az, StandardCharsets.UTF_8);
|
||||
String automqClientId;
|
||||
|
||||
if (props.containsKey(CommonClientConfigs.CLIENT_ID_CONFIG)) {
|
||||
Object currentId = props.get(CommonClientConfigs.CLIENT_ID_CONFIG);
|
||||
if (currentId instanceof String currentIdStr) {
|
||||
automqClientId = "automq_az=" + encodedAz + "&" + currentIdStr;
|
||||
} else {
|
||||
LOGGER.warn("client.id for role {} is not a string ({});",
|
||||
roleDescriptor, currentId.getClass().getName());
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
automqClientId = "automq_az=" + encodedAz;
|
||||
}
|
||||
props.put(CommonClientConfigs.CLIENT_ID_CONFIG, automqClientId);
|
||||
LOGGER.info("Applied AZ-aware client.id for role {} -> {}", roleDescriptor, automqClientId);
|
||||
|
||||
if (family == ClientFamily.CONSUMER) {
|
||||
LOGGER.info("Applying client.rack configuration for consumer role {} -> {}", roleDescriptor, az);
|
||||
Object rackValue = props.get(ConsumerConfig.CLIENT_RACK_CONFIG);
|
||||
if (rackValue == null || String.valueOf(rackValue).isBlank()) {
|
||||
props.put(ConsumerConfig.CLIENT_RACK_CONFIG, az);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public static void maybeApplyProducerAz(Map<String, Object> props, String roleDescriptor) {
|
||||
maybeApplyAz(props, ClientFamily.PRODUCER, roleDescriptor);
|
||||
}
|
||||
|
||||
public static void maybeApplyConsumerAz(Map<String, Object> props, String roleDescriptor) {
|
||||
maybeApplyAz(props, ClientFamily.CONSUMER, roleDescriptor);
|
||||
}
|
||||
|
||||
public static void maybeApplyAdminAz(Map<String, Object> props, String roleDescriptor) {
|
||||
maybeApplyAz(props, ClientFamily.ADMIN, roleDescriptor);
|
||||
}
|
||||
}
|
||||
|
|
@ -1,44 +0,0 @@
|
|||
/*
|
||||
* Copyright 2025, AutoMQ HK Limited.
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.kafka.connect.automq.az;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
|
||||
/**
|
||||
* Pluggable provider for availability-zone metadata used to tune Kafka client configurations.
|
||||
*/
|
||||
public interface AzMetadataProvider {
|
||||
|
||||
/**
|
||||
* Configure the provider with the worker properties. Implementations may cache values extracted from the
|
||||
* configuration map. This method is invoked exactly once during worker bootstrap.
|
||||
*/
|
||||
default void configure(Map<String, String> workerProps) {
|
||||
// no-op
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the availability-zone identifier for the current node, if known.
|
||||
*/
|
||||
default Optional<String> availabilityZoneId() {
|
||||
return Optional.empty();
|
||||
}
|
||||
}
|
||||
|
|
@ -1,64 +0,0 @@
|
|||
/*
|
||||
* Copyright 2025, AutoMQ HK Limited.
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.kafka.connect.automq.az;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.ServiceLoader;
|
||||
|
||||
public final class AzMetadataProviderHolder {
|
||||
private static final Logger LOGGER = LoggerFactory.getLogger(AzMetadataProviderHolder.class);
|
||||
private static final AzMetadataProvider DEFAULT_PROVIDER = new AzMetadataProvider() { };
|
||||
|
||||
private static volatile AzMetadataProvider provider = DEFAULT_PROVIDER;
|
||||
|
||||
private AzMetadataProviderHolder() {
|
||||
}
|
||||
|
||||
public static void initialize(Map<String, String> workerProps) {
|
||||
AzMetadataProvider selected = DEFAULT_PROVIDER;
|
||||
try {
|
||||
ServiceLoader<AzMetadataProvider> loader = ServiceLoader.load(AzMetadataProvider.class);
|
||||
for (AzMetadataProvider candidate : loader) {
|
||||
try {
|
||||
candidate.configure(workerProps);
|
||||
selected = candidate;
|
||||
LOGGER.info("Loaded AZ metadata provider: {}", candidate.getClass().getName());
|
||||
break;
|
||||
} catch (Exception e) {
|
||||
LOGGER.warn("Failed to initialize AZ metadata provider: {}", candidate.getClass().getName(), e);
|
||||
}
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
LOGGER.warn("Failed to load AZ metadata providers", t);
|
||||
}
|
||||
provider = selected;
|
||||
}
|
||||
|
||||
public static AzMetadataProvider provider() {
|
||||
return provider;
|
||||
}
|
||||
|
||||
public static void setProviderForTest(AzMetadataProvider newProvider) {
|
||||
provider = newProvider != null ? newProvider : DEFAULT_PROVIDER;
|
||||
}
|
||||
}
|
||||
|
|
@ -1,56 +0,0 @@
|
|||
/*
|
||||
* Copyright 2025, AutoMQ HK Limited.
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.kafka.connect.automq.log;
|
||||
|
||||
import com.automq.log.S3RollingFileAppender;
|
||||
import com.automq.log.uploader.S3LogConfig;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
|
||||
/**
|
||||
* Initializes the AutoMQ S3 log uploader for Kafka Connect.
|
||||
*/
|
||||
public final class ConnectLogUploader {
|
||||
private static Logger getLogger() {
|
||||
return LoggerFactory.getLogger(ConnectLogUploader.class);
|
||||
}
|
||||
|
||||
private ConnectLogUploader() {
|
||||
}
|
||||
|
||||
public static void initialize(Map<String, String> workerProps) {
|
||||
Properties props = new Properties();
|
||||
if (workerProps != null) {
|
||||
workerProps.forEach((k, v) -> {
|
||||
if (k != null && v != null) {
|
||||
props.put(k, v);
|
||||
}
|
||||
});
|
||||
}
|
||||
ConnectS3LogConfigProvider.initialize(props);
|
||||
S3LogConfig s3LogConfig = new ConnectS3LogConfigProvider().get();
|
||||
S3RollingFileAppender.setup(s3LogConfig);
|
||||
getLogger().info("Initialized Connect S3 log uploader context");
|
||||
}
|
||||
}
|
||||
|
|
@ -1,95 +0,0 @@
|
|||
/*
|
||||
* Copyright 2025, AutoMQ HK Limited.
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.kafka.connect.automq.log;
|
||||
|
||||
import org.apache.kafka.connect.automq.runtime.LeaderNodeSelector;
|
||||
import org.apache.kafka.connect.automq.runtime.RuntimeLeaderSelectorProvider;
|
||||
|
||||
import com.automq.log.uploader.S3LogConfig;
|
||||
import com.automq.stream.s3.operator.BucketURI;
|
||||
import com.automq.stream.s3.operator.ObjectStorage;
|
||||
import com.automq.stream.s3.operator.ObjectStorageFactory;
|
||||
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
public class ConnectS3LogConfig implements S3LogConfig {
|
||||
private static final Logger LOGGER = LoggerFactory.getLogger(ConnectS3LogConfig.class);
|
||||
|
||||
private final boolean enable;
|
||||
private final String clusterId;
|
||||
private final int nodeId;
|
||||
private final String bucketURI;
|
||||
private ObjectStorage objectStorage;
|
||||
private LeaderNodeSelector leaderNodeSelector;
|
||||
|
||||
|
||||
public ConnectS3LogConfig(boolean enable, String clusterId, int nodeId, String bucketURI) {
|
||||
this.enable = enable;
|
||||
this.clusterId = clusterId;
|
||||
this.nodeId = nodeId;
|
||||
this.bucketURI = bucketURI;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isEnabled() {
|
||||
return this.enable;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String clusterId() {
|
||||
return this.clusterId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int nodeId() {
|
||||
return this.nodeId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized ObjectStorage objectStorage() {
|
||||
if (this.objectStorage != null) {
|
||||
return this.objectStorage;
|
||||
}
|
||||
if (StringUtils.isBlank(bucketURI)) {
|
||||
LOGGER.error("Mandatory log config bucketURI is not set.");
|
||||
return null;
|
||||
}
|
||||
|
||||
String normalizedBucket = bucketURI.trim();
|
||||
BucketURI logBucket = BucketURI.parse(normalizedBucket);
|
||||
this.objectStorage = ObjectStorageFactory.instance().builder(logBucket).threadPrefix("s3-log-uploader").build();
|
||||
return this.objectStorage;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isLeader() {
|
||||
LeaderNodeSelector selector = leaderSelector();
|
||||
return selector != null && selector.isLeader();
|
||||
}
|
||||
|
||||
public LeaderNodeSelector leaderSelector() {
|
||||
if (leaderNodeSelector == null) {
|
||||
this.leaderNodeSelector = new RuntimeLeaderSelectorProvider().createSelector();
|
||||
}
|
||||
return leaderNodeSelector;
|
||||
}
|
||||
}
|
||||
|
|
@ -1,112 +0,0 @@
|
|||
/*
|
||||
* Copyright 2025, AutoMQ HK Limited.
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.kafka.connect.automq.log;
|
||||
|
||||
import com.automq.log.uploader.S3LogConfig;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.net.InetAddress;
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
/**
|
||||
* Provides S3 log uploader configuration for Kafka Connect workers.
|
||||
*/
|
||||
public class ConnectS3LogConfigProvider {
|
||||
private static Logger getLogger() {
|
||||
return LoggerFactory.getLogger(ConnectS3LogConfigProvider.class);
|
||||
}
|
||||
private static final AtomicReference<Properties> CONFIG = new AtomicReference<>();
|
||||
private static final long WAIT_TIMEOUT_MS = TimeUnit.SECONDS.toMillis(10);
|
||||
private static final CountDownLatch INIT = new CountDownLatch(1);
|
||||
|
||||
public static void initialize(Properties workerProps) {
|
||||
try {
|
||||
if (workerProps == null) {
|
||||
CONFIG.set(null);
|
||||
return;
|
||||
}
|
||||
Properties copy = new Properties();
|
||||
for (Map.Entry<Object, Object> entry : workerProps.entrySet()) {
|
||||
if (entry.getKey() != null && entry.getValue() != null) {
|
||||
copy.put(entry.getKey(), entry.getValue());
|
||||
}
|
||||
}
|
||||
CONFIG.set(copy);
|
||||
} finally {
|
||||
INIT.countDown();
|
||||
}
|
||||
getLogger().info("Initializing ConnectS3LogConfigProvider");
|
||||
}
|
||||
|
||||
public S3LogConfig get() {
|
||||
|
||||
try {
|
||||
if (!INIT.await(WAIT_TIMEOUT_MS, TimeUnit.MILLISECONDS)) {
|
||||
getLogger().warn("S3 log uploader config not initialized within timeout; uploader disabled.");
|
||||
}
|
||||
} catch (InterruptedException ie) {
|
||||
Thread.currentThread().interrupt();
|
||||
getLogger().warn("Interrupted while waiting for S3 log uploader config; uploader disabled.");
|
||||
return null;
|
||||
}
|
||||
|
||||
Properties source = CONFIG.get();
|
||||
if (source == null) {
|
||||
getLogger().warn("S3 log upload configuration was not provided; uploader disabled.");
|
||||
return null;
|
||||
}
|
||||
|
||||
String bucketURI = source.getProperty(LogConfigConstants.LOG_S3_BUCKET_KEY);
|
||||
String clusterId = source.getProperty(LogConfigConstants.LOG_S3_CLUSTER_ID_KEY);
|
||||
String nodeIdStr = resolveNodeId(source);
|
||||
boolean enable = Boolean.parseBoolean(source.getProperty(LogConfigConstants.LOG_S3_ENABLE_KEY, "false"));
|
||||
return new ConnectS3LogConfig(enable, clusterId, Integer.parseInt(nodeIdStr), bucketURI);
|
||||
}
|
||||
|
||||
private String resolveNodeId(Properties workerProps) {
|
||||
String fromConfig = workerProps.getProperty(LogConfigConstants.LOG_S3_NODE_ID_KEY);
|
||||
if (!isBlank(fromConfig)) {
|
||||
return fromConfig.trim();
|
||||
}
|
||||
String env = System.getenv("CONNECT_NODE_ID");
|
||||
if (!isBlank(env)) {
|
||||
return env.trim();
|
||||
}
|
||||
String host = workerProps.getProperty("automq.log.s3.node.hostname");
|
||||
if (isBlank(host)) {
|
||||
try {
|
||||
host = InetAddress.getLocalHost().getHostName();
|
||||
} catch (Exception e) {
|
||||
host = System.getenv().getOrDefault("HOSTNAME", "0");
|
||||
}
|
||||
}
|
||||
return Integer.toString(host.hashCode() & Integer.MAX_VALUE);
|
||||
}
|
||||
|
||||
private boolean isBlank(String value) {
|
||||
return value == null || value.trim().isEmpty();
|
||||
}
|
||||
}
|
||||
|
|
@ -1,30 +0,0 @@
|
|||
/*
|
||||
* Copyright 2025, AutoMQ HK Limited.
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.kafka.connect.automq.log;
|
||||
|
||||
public class LogConfigConstants {
|
||||
public static final String LOG_S3_ENABLE_KEY = "log.s3.enable";
|
||||
|
||||
public static final String LOG_S3_BUCKET_KEY = "log.s3.bucket";
|
||||
|
||||
public static final String LOG_S3_CLUSTER_ID_KEY = "log.s3.cluster.id";
|
||||
|
||||
public static final String LOG_S3_NODE_ID_KEY = "log.s3.node.id";
|
||||
}
|
||||
|
|
@ -1,77 +0,0 @@
|
|||
package org.apache.kafka.connect.automq.metrics;
|
||||
|
||||
import org.apache.kafka.connect.automq.runtime.LeaderNodeSelector;
|
||||
import org.apache.kafka.connect.automq.runtime.RuntimeLeaderSelectorProvider;
|
||||
|
||||
import com.automq.opentelemetry.exporter.MetricsExportConfig;
|
||||
import com.automq.stream.s3.operator.BucketURI;
|
||||
import com.automq.stream.s3.operator.ObjectStorage;
|
||||
import com.automq.stream.s3.operator.ObjectStorageFactory;
|
||||
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
public class ConnectMetricsExportConfig implements MetricsExportConfig {
|
||||
|
||||
private final BucketURI metricsBucket;
|
||||
private final String clusterId;
|
||||
private final int nodeId;
|
||||
private final int intervalMs;
|
||||
private final List<Pair<String, String>> baseLabels;
|
||||
private ObjectStorage objectStorage;
|
||||
private LeaderNodeSelector leaderNodeSelector;
|
||||
|
||||
|
||||
public ConnectMetricsExportConfig(String clusterId, int nodeId, BucketURI metricsBucket, List<Pair<String, String>> baseLabels, int intervalMs) {
|
||||
this.clusterId = clusterId;
|
||||
this.nodeId = nodeId;
|
||||
this.metricsBucket = metricsBucket;
|
||||
this.baseLabels = baseLabels;
|
||||
this.intervalMs = intervalMs;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String clusterId() {
|
||||
return this.clusterId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isLeader() {
|
||||
LeaderNodeSelector selector = leaderSelector();
|
||||
return selector != null && selector.isLeader();
|
||||
}
|
||||
|
||||
public LeaderNodeSelector leaderSelector() {
|
||||
if (leaderNodeSelector == null) {
|
||||
this.leaderNodeSelector = new RuntimeLeaderSelectorProvider().createSelector();
|
||||
}
|
||||
return leaderNodeSelector;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int nodeId() {
|
||||
return this.nodeId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ObjectStorage objectStorage() {
|
||||
if (metricsBucket == null) {
|
||||
return null;
|
||||
}
|
||||
if (this.objectStorage == null) {
|
||||
this.objectStorage = ObjectStorageFactory.instance().builder(metricsBucket).threadPrefix("s3-metric").build();
|
||||
}
|
||||
return this.objectStorage;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<Pair<String, String>> baseLabels() {
|
||||
return this.baseLabels;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int intervalMs() {
|
||||
return this.intervalMs;
|
||||
}
|
||||
}
|
||||
|
|
@ -1,30 +0,0 @@
|
|||
package org.apache.kafka.connect.automq.metrics;
|
||||
|
||||
public class MetricsConfigConstants {
|
||||
public static final String SERVICE_NAME_KEY = "service.name";
|
||||
public static final String SERVICE_INSTANCE_ID_KEY = "service.instance.id";
|
||||
public static final String S3_CLIENT_ID_KEY = "automq.telemetry.s3.cluster.id";
|
||||
/**
|
||||
* The URI for configuring metrics exporters. e.g. prometheus://localhost:9090, otlp://localhost:4317
|
||||
*/
|
||||
public static final String EXPORTER_URI_KEY = "automq.telemetry.exporter.uri";
|
||||
/**
|
||||
* The export interval in milliseconds.
|
||||
*/
|
||||
public static final String EXPORTER_INTERVAL_MS_KEY = "automq.telemetry.exporter.interval.ms";
|
||||
/**
|
||||
* The cardinality limit for any single metric.
|
||||
*/
|
||||
public static final String METRIC_CARDINALITY_LIMIT_KEY = "automq.telemetry.metric.cardinality.limit";
|
||||
public static final int DEFAULT_METRIC_CARDINALITY_LIMIT = 20000;
|
||||
|
||||
public static final String TELEMETRY_METRICS_BASE_LABELS_CONFIG = "automq.telemetry.metrics.base.labels";
|
||||
public static final String TELEMETRY_METRICS_BASE_LABELS_DOC = "The base labels that will be added to all metrics. The format is key1=value1,key2=value2.";
|
||||
|
||||
public static final String S3_BUCKET = "automq.telemetry.s3.bucket";
|
||||
public static final String S3_BUCKETS_DOC = "The buckets url with format 0@s3://$bucket?region=$region. \n" +
|
||||
"the full url format for s3 is 0@s3://$bucket?region=$region[&endpoint=$endpoint][&pathStyle=$enablePathStyle][&authType=$authType][&accessKey=$accessKey][&secretKey=$secretKey][&checksumAlgorithm=$checksumAlgorithm]" +
|
||||
"- pathStyle: true|false. The object storage access path style. When using MinIO, it should be set to true.\n" +
|
||||
"- authType: instance|static. When set to instance, it will use instance profile to auth. When set to static, it will get accessKey and secretKey from the url or from system environment KAFKA_S3_ACCESS_KEY/KAFKA_S3_SECRET_KEY.";
|
||||
|
||||
}
|
||||
|
|
@ -1,822 +0,0 @@
|
|||
/*
|
||||
* Copyright 2025, AutoMQ HK Limited.
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.kafka.connect.automq.metrics;
|
||||
|
||||
import org.apache.kafka.common.MetricName;
|
||||
import org.apache.kafka.common.metrics.KafkaMetric;
|
||||
import org.apache.kafka.common.metrics.MetricsReporter;
|
||||
|
||||
import com.automq.opentelemetry.AutoMQTelemetryManager;
|
||||
import com.automq.stream.s3.operator.BucketURI;
|
||||
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
import io.opentelemetry.api.common.Attributes;
|
||||
import io.opentelemetry.api.common.AttributesBuilder;
|
||||
import io.opentelemetry.api.metrics.Meter;
|
||||
import io.opentelemetry.api.metrics.ObservableDoubleCounter;
|
||||
import io.opentelemetry.api.metrics.ObservableDoubleGauge;
|
||||
import io.opentelemetry.api.metrics.ObservableLongCounter;
|
||||
|
||||
/**
|
||||
* A MetricsReporter implementation that bridges Kafka Connect metrics to OpenTelemetry.
|
||||
*
|
||||
* <p>This reporter integrates with the AutoMQ OpenTelemetry module to export Kafka Connect
|
||||
* metrics through various exporters (Prometheus, OTLP, etc.). It automatically converts
|
||||
* Kafka metrics to OpenTelemetry instruments based on metric types and provides proper
|
||||
* labeling and naming conventions.
|
||||
*
|
||||
* <p>Key features:
|
||||
* <ul>
|
||||
* <li>Automatic metric type detection and conversion</li>
|
||||
* <li>Support for gauges and counters using async observable instruments</li>
|
||||
* <li>Proper attribute mapping from Kafka metric tags</li>
|
||||
* <li>Integration with AutoMQ telemetry infrastructure</li>
|
||||
* <li>Configurable metric filtering</li>
|
||||
* <li>Real-time metric value updates through callbacks</li>
|
||||
* </ul>
|
||||
*
|
||||
* <p>Configuration options:
|
||||
* <ul>
|
||||
* <li>{@code opentelemetry.metrics.enabled} - Enable/disable OpenTelemetry metrics (default: true)</li>
|
||||
* <li>{@code opentelemetry.metrics.prefix} - Prefix for metric names (default: "kafka.connect")</li>
|
||||
* <li>{@code opentelemetry.metrics.include.pattern} - Regex pattern for included metrics</li>
|
||||
* <li>{@code opentelemetry.metrics.exclude.pattern} - Regex pattern for excluded metrics</li>
|
||||
* </ul>
|
||||
*/
|
||||
public class OpenTelemetryMetricsReporter implements MetricsReporter {
|
||||
private static final Logger LOGGER = LoggerFactory.getLogger(OpenTelemetryMetricsReporter.class);
|
||||
|
||||
private static final String ENABLED_CONFIG = "opentelemetry.metrics.enabled";
|
||||
private static final String PREFIX_CONFIG = "opentelemetry.metrics.prefix";
|
||||
private static final String INCLUDE_PATTERN_CONFIG = "opentelemetry.metrics.include.pattern";
|
||||
private static final String EXCLUDE_PATTERN_CONFIG = "opentelemetry.metrics.exclude.pattern";
|
||||
|
||||
private static final String DEFAULT_PREFIX = "kafka";
|
||||
|
||||
private boolean enabled = true;
|
||||
private String metricPrefix = DEFAULT_PREFIX;
|
||||
private String includePattern = null;
|
||||
private String excludePattern = null;
|
||||
|
||||
private Meter meter;
|
||||
private final Map<String, AutoCloseable> observableHandles = new ConcurrentHashMap<>();
|
||||
private final Map<String, KafkaMetric> registeredMetrics = new ConcurrentHashMap<>();
|
||||
|
||||
public static void initializeTelemetry(Properties props) {
|
||||
String exportURIStr = props.getProperty(MetricsConfigConstants.EXPORTER_URI_KEY);
|
||||
String serviceName = props.getProperty(MetricsConfigConstants.SERVICE_NAME_KEY, "connect-default");
|
||||
String instanceId = props.getProperty(MetricsConfigConstants.SERVICE_INSTANCE_ID_KEY, "0");
|
||||
String clusterId = props.getProperty(MetricsConfigConstants.S3_CLIENT_ID_KEY, "cluster-default");
|
||||
int intervalMs = Integer.parseInt(props.getProperty(MetricsConfigConstants.EXPORTER_INTERVAL_MS_KEY, "60000"));
|
||||
BucketURI metricsBucket = getMetricsBucket(props);
|
||||
List<Pair<String, String>> baseLabels = getBaseLabels(props);
|
||||
|
||||
AutoMQTelemetryManager.initializeInstance(exportURIStr, serviceName, instanceId, new ConnectMetricsExportConfig(clusterId, Integer.parseInt(instanceId), metricsBucket, baseLabels, intervalMs));
|
||||
LOGGER.info("OpenTelemetryMetricsReporter initialized");
|
||||
}
|
||||
|
||||
private static BucketURI getMetricsBucket(Properties props) {
|
||||
String metricsBucket = props.getProperty(MetricsConfigConstants.S3_BUCKET, "");
|
||||
if (StringUtils.isNotBlank(metricsBucket)) {
|
||||
List<BucketURI> bucketList = BucketURI.parseBuckets(metricsBucket);
|
||||
if (!bucketList.isEmpty()) {
|
||||
return bucketList.get(0);
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
private static List<Pair<String, String>> getBaseLabels(Properties props) {
|
||||
// This part is hard to abstract without a clear config pattern.
|
||||
// Assuming for now it's empty. The caller can extend this class
|
||||
// or the manager can have a method to add more labels.
|
||||
String baseLabels = props.getProperty(MetricsConfigConstants.TELEMETRY_METRICS_BASE_LABELS_CONFIG);
|
||||
if (StringUtils.isBlank(baseLabels)) {
|
||||
return Collections.emptyList();
|
||||
}
|
||||
List<Pair<String, String>> labels = new ArrayList<>();
|
||||
for (String label : baseLabels.split(",")) {
|
||||
String[] kv = label.split("=");
|
||||
if (kv.length != 2) {
|
||||
continue;
|
||||
}
|
||||
labels.add(Pair.of(kv[0], kv[1]));
|
||||
}
|
||||
return labels;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void configure(Map<String, ?> configs) {
|
||||
// Parse configuration
|
||||
Object enabledObj = configs.get(ENABLED_CONFIG);
|
||||
if (enabledObj != null) {
|
||||
enabled = Boolean.parseBoolean(enabledObj.toString());
|
||||
}
|
||||
|
||||
Object prefixObj = configs.get(PREFIX_CONFIG);
|
||||
if (prefixObj != null) {
|
||||
metricPrefix = prefixObj.toString();
|
||||
}
|
||||
|
||||
Object includeObj = configs.get(INCLUDE_PATTERN_CONFIG);
|
||||
if (includeObj != null) {
|
||||
includePattern = includeObj.toString();
|
||||
}
|
||||
|
||||
Object excludeObj = configs.get(EXCLUDE_PATTERN_CONFIG);
|
||||
if (excludeObj != null) {
|
||||
excludePattern = excludeObj.toString();
|
||||
}
|
||||
|
||||
LOGGER.info("OpenTelemetryMetricsReporter configured - enabled: {}, prefix: {}, include: {}, exclude: {}",
|
||||
enabled, metricPrefix, includePattern, excludePattern);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void init(List<KafkaMetric> metrics) {
|
||||
if (!enabled) {
|
||||
LOGGER.info("OpenTelemetryMetricsReporter is disabled");
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
// Get the OpenTelemetry meter from AutoMQTelemetryManager
|
||||
// This assumes the telemetry manager is already initialized
|
||||
meter = AutoMQTelemetryManager.getInstance().getMeter();
|
||||
if (meter == null) {
|
||||
LOGGER.warn("AutoMQTelemetryManager is not initialized, OpenTelemetry metrics will not be available");
|
||||
enabled = false;
|
||||
return;
|
||||
}
|
||||
|
||||
// Register initial metrics
|
||||
for (KafkaMetric metric : metrics) {
|
||||
registerMetric(metric);
|
||||
}
|
||||
|
||||
LOGGER.info("OpenTelemetryMetricsReporter initialized with {} metrics", metrics.size());
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("Failed to initialize OpenTelemetryMetricsReporter", e);
|
||||
enabled = false;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void metricChange(KafkaMetric metric) {
|
||||
if (!enabled || meter == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
registerMetric(metric);
|
||||
} catch (Exception e) {
|
||||
LOGGER.warn("Failed to register metric change for {}", metric.metricName(), e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void metricRemoval(KafkaMetric metric) {
|
||||
if (!enabled) {
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
String metricKey = buildMetricKey(metric.metricName());
|
||||
closeHandle(metricKey);
|
||||
registeredMetrics.remove(metricKey);
|
||||
LOGGER.debug("Removed metric: {}", metricKey);
|
||||
} catch (Exception e) {
|
||||
LOGGER.warn("Failed to remove metric {}", metric.metricName(), e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
if (enabled) {
|
||||
// Close all observable handles to prevent memory leaks
|
||||
observableHandles.values().forEach(handle -> {
|
||||
try {
|
||||
handle.close();
|
||||
} catch (Exception e) {
|
||||
LOGGER.debug("Error closing observable handle", e);
|
||||
}
|
||||
});
|
||||
observableHandles.clear();
|
||||
registeredMetrics.clear();
|
||||
}
|
||||
LOGGER.info("OpenTelemetryMetricsReporter closed");
|
||||
}
|
||||
|
||||
private void registerMetric(KafkaMetric metric) {
|
||||
LOGGER.debug("OpenTelemetryMetricsReporter registering metric {}", metric.metricName());
|
||||
MetricName metricName = metric.metricName();
|
||||
String metricKey = buildMetricKey(metricName);
|
||||
|
||||
// Apply filtering
|
||||
if (!shouldIncludeMetric(metricKey)) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Check if metric value is numeric at registration time
|
||||
Object testValue = safeMetricValue(metric);
|
||||
if (!(testValue instanceof Number)) {
|
||||
LOGGER.debug("Skipping non-numeric metric: {}", metricKey);
|
||||
return;
|
||||
}
|
||||
|
||||
Attributes attributes = buildAttributes(metricName);
|
||||
|
||||
// Close existing handle if present (for metric updates)
|
||||
closeHandle(metricKey);
|
||||
|
||||
// Register the metric for future access
|
||||
registeredMetrics.put(metricKey, metric);
|
||||
|
||||
// Determine metric type and register accordingly
|
||||
if (isCounterMetric(metricName)) {
|
||||
registerAsyncCounter(metricKey, metricName, metric, attributes, (Number) testValue);
|
||||
} else {
|
||||
registerAsyncGauge(metricKey, metricName, metric, attributes);
|
||||
}
|
||||
}
|
||||
|
||||
private void registerAsyncGauge(String metricKey, MetricName metricName, KafkaMetric metric, Attributes attributes) {
|
||||
try {
|
||||
String description = buildDescription(metricName);
|
||||
String unit = determineUnit(metricName);
|
||||
|
||||
ObservableDoubleGauge gauge = meter.gaugeBuilder(metricKey)
|
||||
.setDescription(description)
|
||||
.setUnit(unit)
|
||||
.buildWithCallback(measurement -> {
|
||||
Number value = (Number) safeMetricValue(metric);
|
||||
if (value != null) {
|
||||
measurement.record(value.doubleValue(), attributes);
|
||||
}
|
||||
});
|
||||
|
||||
observableHandles.put(metricKey, gauge);
|
||||
LOGGER.debug("Registered async gauge: {}", metricKey);
|
||||
} catch (Exception e) {
|
||||
LOGGER.warn("Failed to register async gauge for {}", metricKey, e);
|
||||
}
|
||||
}
|
||||
|
||||
private void registerAsyncCounter(String metricKey, MetricName metricName, KafkaMetric metric,
|
||||
Attributes attributes, Number initialValue) {
|
||||
try {
|
||||
String description = buildDescription(metricName);
|
||||
String unit = determineUnit(metricName);
|
||||
|
||||
// Use appropriate counter type based on initial value type
|
||||
if (initialValue instanceof Long || initialValue instanceof Integer) {
|
||||
ObservableLongCounter counter = meter.counterBuilder(metricKey)
|
||||
.setDescription(description)
|
||||
.setUnit(unit)
|
||||
.buildWithCallback(measurement -> {
|
||||
Number value = (Number) safeMetricValue(metric);
|
||||
if (value != null) {
|
||||
long longValue = value.longValue();
|
||||
if (longValue >= 0) {
|
||||
measurement.record(longValue, attributes);
|
||||
}
|
||||
}
|
||||
});
|
||||
observableHandles.put(metricKey, counter);
|
||||
} else {
|
||||
ObservableDoubleCounter counter = meter.counterBuilder(metricKey)
|
||||
.ofDoubles()
|
||||
.setDescription(description)
|
||||
.setUnit(unit)
|
||||
.buildWithCallback(measurement -> {
|
||||
Number value = (Number) safeMetricValue(metric);
|
||||
if (value != null) {
|
||||
double doubleValue = value.doubleValue();
|
||||
if (doubleValue >= 0) {
|
||||
measurement.record(doubleValue, attributes);
|
||||
}
|
||||
}
|
||||
});
|
||||
observableHandles.put(metricKey, counter);
|
||||
}
|
||||
|
||||
LOGGER.debug("Registered async counter: {}", metricKey);
|
||||
} catch (Exception e) {
|
||||
LOGGER.warn("Failed to register async counter for {}", metricKey, e);
|
||||
}
|
||||
}
|
||||
|
||||
private Object safeMetricValue(KafkaMetric metric) {
|
||||
try {
|
||||
return metric.metricValue();
|
||||
} catch (Exception e) {
|
||||
LOGGER.debug("Failed to read metric value for {}", metric.metricName(), e);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
private void closeHandle(String metricKey) {
|
||||
AutoCloseable handle = observableHandles.remove(metricKey);
|
||||
if (handle != null) {
|
||||
try {
|
||||
handle.close();
|
||||
} catch (Exception e) {
|
||||
LOGGER.debug("Error closing handle for {}", metricKey, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private String buildMetricKey(MetricName metricName) {
|
||||
StringBuilder sb = new StringBuilder(metricPrefix);
|
||||
sb.append(".");
|
||||
|
||||
// Add group if present
|
||||
if (metricName.group() != null && !metricName.group().isEmpty()) {
|
||||
sb.append(metricName.group().replace("-", "_").toLowerCase(Locale.ROOT));
|
||||
sb.append(".");
|
||||
}
|
||||
|
||||
// Add name
|
||||
sb.append(metricName.name().replace("-", "_").toLowerCase(Locale.ROOT));
|
||||
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
private Attributes buildAttributes(MetricName metricName) {
|
||||
AttributesBuilder builder = Attributes.builder();
|
||||
|
||||
// Add metric tags as attributes
|
||||
Map<String, String> tags = metricName.tags();
|
||||
if (tags != null) {
|
||||
for (Map.Entry<String, String> entry : tags.entrySet()) {
|
||||
String key = entry.getKey();
|
||||
String value = entry.getValue();
|
||||
if (key != null && value != null) {
|
||||
builder.put(sanitizeAttributeKey(key), value);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Add standard attributes
|
||||
if (metricName.group() != null) {
|
||||
builder.put("metric.group", metricName.group());
|
||||
}
|
||||
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
private String sanitizeAttributeKey(String key) {
|
||||
return key.replace("-", "_").replace(".", "_").toLowerCase(Locale.ROOT);
|
||||
}
|
||||
|
||||
private String buildDescription(MetricName metricName) {
|
||||
StringBuilder description = new StringBuilder();
|
||||
description.append("Kafka Connect metric: ");
|
||||
|
||||
if (metricName.group() != null) {
|
||||
description.append(metricName.group()).append(" - ");
|
||||
}
|
||||
|
||||
description.append(metricName.name());
|
||||
|
||||
return description.toString();
|
||||
}
|
||||
|
||||
private String determineUnit(MetricName metricName) {
|
||||
String name = metricName.name().toLowerCase(Locale.ROOT);
|
||||
String group = metricName.group() != null ? metricName.group().toLowerCase(Locale.ROOT) : "";
|
||||
|
||||
if (isKafkaConnectMetric(group)) {
|
||||
return determineConnectMetricUnit(name);
|
||||
}
|
||||
|
||||
if (isTimeMetric(name)) {
|
||||
return determineTimeUnit(name);
|
||||
}
|
||||
|
||||
if (isBytesMetric(name)) {
|
||||
return determineBytesUnit(name);
|
||||
}
|
||||
|
||||
if (isRateMetric(name)) {
|
||||
return "1/s";
|
||||
}
|
||||
|
||||
if (isRatioOrPercentageMetric(name)) {
|
||||
return "1";
|
||||
}
|
||||
|
||||
if (isCountMetric(name)) {
|
||||
return "1";
|
||||
}
|
||||
|
||||
return "1";
|
||||
}
|
||||
|
||||
private boolean isCounterMetric(MetricName metricName) {
|
||||
String name = metricName.name().toLowerCase(Locale.ROOT);
|
||||
String group = metricName.group() != null ? metricName.group().toLowerCase(Locale.ROOT) : "";
|
||||
|
||||
if (isKafkaConnectMetric(group)) {
|
||||
return isConnectCounterMetric(name);
|
||||
}
|
||||
|
||||
if (isGaugeMetric(name)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return hasCounterKeywords(name);
|
||||
}
|
||||
|
||||
private boolean isGaugeMetric(String name) {
|
||||
return hasRateOrAvgKeywords(name) || hasRatioOrPercentKeywords(name) ||
|
||||
hasMinMaxOrCurrentKeywords(name) || hasActiveOrSizeKeywords(name) ||
|
||||
hasTimeButNotTotal(name);
|
||||
}
|
||||
|
||||
private boolean hasRateOrAvgKeywords(String name) {
|
||||
return name.contains("rate") || name.contains("avg") || name.contains("mean");
|
||||
}
|
||||
|
||||
private boolean hasRatioOrPercentKeywords(String name) {
|
||||
return name.contains("ratio") || name.contains("percent") || name.contains("pct");
|
||||
}
|
||||
|
||||
private boolean hasMinMaxOrCurrentKeywords(String name) {
|
||||
return name.contains("max") || name.contains("min") || name.contains("current");
|
||||
}
|
||||
|
||||
private boolean hasActiveOrSizeKeywords(String name) {
|
||||
return name.contains("active") || name.contains("lag") || name.contains("size");
|
||||
}
|
||||
|
||||
private boolean hasTimeButNotTotal(String name) {
|
||||
return name.contains("time") && !name.contains("total");
|
||||
}
|
||||
|
||||
private boolean hasCounterKeywords(String name) {
|
||||
String[] parts = name.split("[._-]");
|
||||
for (String part : parts) {
|
||||
if (isCounterKeyword(part)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
private boolean isCounterKeyword(String part) {
|
||||
return isBasicCounterKeyword(part) || isAdvancedCounterKeyword(part);
|
||||
}
|
||||
|
||||
private boolean isBasicCounterKeyword(String part) {
|
||||
return "total".equals(part) || "count".equals(part) || "sum".equals(part) ||
|
||||
"attempts".equals(part);
|
||||
}
|
||||
|
||||
private boolean isAdvancedCounterKeyword(String part) {
|
||||
return "success".equals(part) || "failure".equals(part) ||
|
||||
"errors".equals(part) || "retries".equals(part) || "skipped".equals(part);
|
||||
}
|
||||
|
||||
private boolean isConnectCounterMetric(String name) {
|
||||
if (hasTotalBasedCounters(name)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (hasRecordCounters(name)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (hasActiveCountMetrics(name)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
private boolean hasTotalBasedCounters(String name) {
|
||||
return hasBasicTotalCounters(name) || hasSuccessFailureCounters(name) ||
|
||||
hasErrorRetryCounters(name) || hasRequestCompletionCounters(name);
|
||||
}
|
||||
|
||||
private boolean hasBasicTotalCounters(String name) {
|
||||
return name.contains("total") || name.contains("attempts");
|
||||
}
|
||||
|
||||
private boolean hasSuccessFailureCounters(String name) {
|
||||
return (name.contains("success") && name.contains("total")) ||
|
||||
(name.contains("failure") && name.contains("total"));
|
||||
}
|
||||
|
||||
private boolean hasErrorRetryCounters(String name) {
|
||||
return name.contains("errors") || name.contains("retries") || name.contains("skipped");
|
||||
}
|
||||
|
||||
private boolean hasRequestCompletionCounters(String name) {
|
||||
return name.contains("requests") || name.contains("completions");
|
||||
}
|
||||
|
||||
private boolean hasRecordCounters(String name) {
|
||||
return hasRecordKeyword(name) && hasTotalOperation(name);
|
||||
}
|
||||
|
||||
private boolean hasRecordKeyword(String name) {
|
||||
return name.contains("record") || name.contains("records");
|
||||
}
|
||||
|
||||
private boolean hasTotalOperation(String name) {
|
||||
return hasPollWriteTotal(name) || hasReadSendTotal(name);
|
||||
}
|
||||
|
||||
private boolean hasPollWriteTotal(String name) {
|
||||
return name.contains("poll-total") || name.contains("write-total");
|
||||
}
|
||||
|
||||
private boolean hasReadSendTotal(String name) {
|
||||
return name.contains("read-total") || name.contains("send-total");
|
||||
}
|
||||
|
||||
private boolean hasActiveCountMetrics(String name) {
|
||||
return hasCountMetrics(name) || hasSequenceMetrics(name);
|
||||
}
|
||||
|
||||
private boolean hasCountMetrics(String name) {
|
||||
return hasActiveTaskCount(name) || hasConnectorCount(name) || hasStatusCount(name);
|
||||
}
|
||||
|
||||
private boolean hasActiveTaskCount(String name) {
|
||||
return name.contains("active-count") || name.contains("partition-count") ||
|
||||
name.contains("task-count");
|
||||
}
|
||||
|
||||
private boolean hasConnectorCount(String name) {
|
||||
return name.contains("connector-count") || name.contains("running-count");
|
||||
}
|
||||
|
||||
private boolean hasStatusCount(String name) {
|
||||
return name.contains("paused-count") || name.contains("failed-count");
|
||||
}
|
||||
|
||||
private boolean hasSequenceMetrics(String name) {
|
||||
return name.contains("seq-no") || name.contains("seq-num");
|
||||
}
|
||||
|
||||
private boolean isKafkaConnectMetric(String group) {
|
||||
return group.contains("connector") || group.contains("task") ||
|
||||
group.contains("connect") || group.contains("worker");
|
||||
}
|
||||
|
||||
private String determineConnectMetricUnit(String name) {
|
||||
String timeUnit = getTimeUnit(name);
|
||||
if (timeUnit != null) {
|
||||
return timeUnit;
|
||||
}
|
||||
|
||||
String countUnit = getCountUnit(name);
|
||||
if (countUnit != null) {
|
||||
return countUnit;
|
||||
}
|
||||
|
||||
String specialUnit = getSpecialUnit(name);
|
||||
if (specialUnit != null) {
|
||||
return specialUnit;
|
||||
}
|
||||
|
||||
return "1";
|
||||
}
|
||||
|
||||
private String getTimeUnit(String name) {
|
||||
if (isTimeBasedMetric(name)) {
|
||||
return "ms";
|
||||
}
|
||||
if (isTimestampMetric(name)) {
|
||||
return "ms";
|
||||
}
|
||||
if (isTimeSinceMetric(name)) {
|
||||
return "ms";
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
private String getCountUnit(String name) {
|
||||
if (isSequenceOrCountMetric(name)) {
|
||||
return "1";
|
||||
}
|
||||
if (isLagMetric(name)) {
|
||||
return "1";
|
||||
}
|
||||
if (isTotalOrCounterMetric(name)) {
|
||||
return "1";
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
private String getSpecialUnit(String name) {
|
||||
if (isStatusOrMetadataMetric(name)) {
|
||||
return "1";
|
||||
}
|
||||
if (isConnectRateMetric(name)) {
|
||||
return "1/s";
|
||||
}
|
||||
if (isRatioMetric(name)) {
|
||||
return "1";
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
private boolean isTimeBasedMetric(String name) {
|
||||
return hasTimeMs(name) || hasCommitBatchTime(name);
|
||||
}
|
||||
|
||||
private boolean hasTimeMs(String name) {
|
||||
return name.endsWith("-time-ms") || name.endsWith("-avg-time-ms") ||
|
||||
name.endsWith("-max-time-ms");
|
||||
}
|
||||
|
||||
private boolean hasCommitBatchTime(String name) {
|
||||
return name.contains("commit-time") || name.contains("batch-time") ||
|
||||
name.contains("rebalance-time");
|
||||
}
|
||||
|
||||
private boolean isSequenceOrCountMetric(String name) {
|
||||
return hasSequenceNumbers(name) || hasCountSuffix(name);
|
||||
}
|
||||
|
||||
private boolean hasSequenceNumbers(String name) {
|
||||
return name.contains("seq-no") || name.contains("seq-num");
|
||||
}
|
||||
|
||||
private boolean hasCountSuffix(String name) {
|
||||
return name.endsWith("-count") || name.contains("task-count") ||
|
||||
name.contains("partition-count");
|
||||
}
|
||||
|
||||
private boolean isLagMetric(String name) {
|
||||
return name.contains("lag");
|
||||
}
|
||||
|
||||
private boolean isStatusOrMetadataMetric(String name) {
|
||||
return isStatusMetric(name) || hasProtocolLeaderMetrics(name) ||
|
||||
hasConnectorMetrics(name);
|
||||
}
|
||||
|
||||
private boolean isStatusMetric(String name) {
|
||||
return "status".equals(name) || name.contains("protocol");
|
||||
}
|
||||
|
||||
private boolean hasProtocolLeaderMetrics(String name) {
|
||||
return name.contains("leader-name");
|
||||
}
|
||||
|
||||
private boolean hasConnectorMetrics(String name) {
|
||||
return name.contains("connector-type") || name.contains("connector-class") ||
|
||||
name.contains("connector-version");
|
||||
}
|
||||
|
||||
private boolean isRatioMetric(String name) {
|
||||
return name.contains("ratio") || name.contains("percentage");
|
||||
}
|
||||
|
||||
private boolean isTotalOrCounterMetric(String name) {
|
||||
return hasTotalSum(name) || hasAttempts(name) || hasSuccessFailure(name) ||
|
||||
hasErrorsRetries(name);
|
||||
}
|
||||
|
||||
private boolean hasTotalSum(String name) {
|
||||
return name.contains("total") || name.contains("sum");
|
||||
}
|
||||
|
||||
private boolean hasAttempts(String name) {
|
||||
return name.contains("attempts");
|
||||
}
|
||||
|
||||
private boolean hasSuccessFailure(String name) {
|
||||
return name.contains("success") || name.contains("failure");
|
||||
}
|
||||
|
||||
private boolean hasErrorsRetries(String name) {
|
||||
return name.contains("errors") || name.contains("retries") || name.contains("skipped");
|
||||
}
|
||||
|
||||
private boolean isTimestampMetric(String name) {
|
||||
return name.contains("timestamp") || name.contains("epoch");
|
||||
}
|
||||
|
||||
private boolean isConnectRateMetric(String name) {
|
||||
return name.contains("rate") && !name.contains("ratio");
|
||||
}
|
||||
|
||||
private boolean isTimeSinceMetric(String name) {
|
||||
return name.contains("time-since-last") || name.contains("since-last");
|
||||
}
|
||||
|
||||
private boolean isTimeMetric(String name) {
|
||||
return hasTimeKeywords(name) && !hasTimeExclusions(name);
|
||||
}
|
||||
|
||||
private boolean hasTimeKeywords(String name) {
|
||||
return name.contains("time") || name.contains("latency") ||
|
||||
name.contains("duration");
|
||||
}
|
||||
|
||||
private boolean hasTimeExclusions(String name) {
|
||||
return name.contains("ratio") || name.contains("rate") ||
|
||||
name.contains("count") || name.contains("since-last");
|
||||
}
|
||||
|
||||
private String determineTimeUnit(String name) {
|
||||
if (name.contains("ms") || name.contains("millisecond")) {
|
||||
return "ms";
|
||||
} else if (name.contains("us") || name.contains("microsecond")) {
|
||||
return "us";
|
||||
} else if (name.contains("ns") || name.contains("nanosecond")) {
|
||||
return "ns";
|
||||
} else if (name.contains("s") && !name.contains("ms")) {
|
||||
return "s";
|
||||
} else {
|
||||
return "ms";
|
||||
}
|
||||
}
|
||||
|
||||
private boolean isBytesMetric(String name) {
|
||||
return name.contains("byte") || name.contains("bytes") ||
|
||||
name.contains("size") && !name.contains("batch-size");
|
||||
}
|
||||
|
||||
private String determineBytesUnit(String name) {
|
||||
boolean isRate = name.contains("rate") || name.contains("per-sec") ||
|
||||
name.contains("persec") || name.contains("/s");
|
||||
return isRate ? "By/s" : "By";
|
||||
}
|
||||
|
||||
private boolean isRateMetric(String name) {
|
||||
return hasRateKeywords(name) && !hasExcludedKeywords(name);
|
||||
}
|
||||
|
||||
private boolean hasRateKeywords(String name) {
|
||||
return name.contains("rate") || name.contains("per-sec") ||
|
||||
name.contains("persec") || name.contains("/s");
|
||||
}
|
||||
|
||||
private boolean hasExcludedKeywords(String name) {
|
||||
return name.contains("byte") || name.contains("ratio");
|
||||
}
|
||||
|
||||
private boolean isRatioOrPercentageMetric(String name) {
|
||||
return hasPercentKeywords(name) || hasRatioKeywords(name);
|
||||
}
|
||||
|
||||
private boolean hasPercentKeywords(String name) {
|
||||
return name.contains("percent") || name.contains("pct");
|
||||
}
|
||||
|
||||
private boolean hasRatioKeywords(String name) {
|
||||
return name.contains("ratio");
|
||||
}
|
||||
|
||||
private boolean isCountMetric(String name) {
|
||||
return name.contains("count") || name.contains("total") ||
|
||||
name.contains("sum") || name.endsWith("-num");
|
||||
}
|
||||
|
||||
private boolean shouldIncludeMetric(String metricKey) {
|
||||
if (excludePattern != null && metricKey.matches(excludePattern)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (includePattern != null) {
|
||||
return metricKey.matches(includePattern);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
|
@ -1,34 +0,0 @@
|
|||
/*
|
||||
* Copyright 2025, AutoMQ HK Limited.
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.kafka.connect.automq.runtime;
|
||||
|
||||
/**
|
||||
* An interface for determining which node should be responsible for clean metrics.
|
||||
* This abstraction allows different implementations of clean node selection strategies.
|
||||
*/
|
||||
public interface LeaderNodeSelector {
|
||||
|
||||
/**
|
||||
* Determines if the current node should be responsible for clean metrics.
|
||||
*
|
||||
* @return true if the current node should clean metrics, false otherwise.
|
||||
*/
|
||||
boolean isLeader();
|
||||
}
|
||||
|
|
@ -1,36 +0,0 @@
|
|||
/*
|
||||
* Copyright 2025, AutoMQ HK Limited.
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.kafka.connect.automq.runtime;
|
||||
|
||||
/**
|
||||
* SPI interface for providing custom LeaderNodeSelector implementations.
|
||||
* Third-party libraries can implement this interface and register their implementations
|
||||
* using Java's ServiceLoader mechanism.
|
||||
*/
|
||||
public interface LeaderNodeSelectorProvider {
|
||||
|
||||
/**
|
||||
* Creates a new LeaderNodeSelector instance based on the provided configuration.
|
||||
*
|
||||
* @return A new LeaderNodeSelector instance
|
||||
* @throws Exception If the selector cannot be created
|
||||
*/
|
||||
LeaderNodeSelector createSelector() throws Exception;
|
||||
}
|
||||
|
|
@ -1,46 +0,0 @@
|
|||
/*
|
||||
* Copyright 2025, AutoMQ HK Limited.
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.kafka.connect.automq.runtime;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.util.function.BooleanSupplier;
|
||||
|
||||
/**
|
||||
* Stores runtime-provided suppliers that answer whether the current process
|
||||
* should act as the leader.
|
||||
*/
|
||||
public final class RuntimeLeaderRegistry {
|
||||
private static final Logger LOGGER = LoggerFactory.getLogger(RuntimeLeaderRegistry.class);
|
||||
private static BooleanSupplier supplier = () -> false;
|
||||
|
||||
private RuntimeLeaderRegistry() {
|
||||
}
|
||||
|
||||
public static void register(BooleanSupplier supplier) {
|
||||
RuntimeLeaderRegistry.supplier = supplier;
|
||||
LOGGER.info("Registered runtime leader supplier for log metrics.");
|
||||
}
|
||||
|
||||
public static BooleanSupplier supplier() {
|
||||
return supplier;
|
||||
}
|
||||
}
|
||||
|
|
@ -1,74 +0,0 @@
|
|||
/*
|
||||
* Copyright 2025, AutoMQ HK Limited.
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.kafka.connect.automq.runtime;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.function.BooleanSupplier;
|
||||
|
||||
public class RuntimeLeaderSelectorProvider implements LeaderNodeSelectorProvider {
|
||||
private static final Logger LOGGER = LoggerFactory.getLogger(RuntimeLeaderSelectorProvider.class);
|
||||
|
||||
@Override
|
||||
public LeaderNodeSelector createSelector() {
|
||||
final AtomicBoolean missingLogged = new AtomicBoolean(false);
|
||||
final AtomicBoolean leaderLogged = new AtomicBoolean(false);
|
||||
|
||||
return () -> {
|
||||
BooleanSupplier current = org.apache.kafka.connect.automq.runtime.RuntimeLeaderRegistry.supplier();
|
||||
if (current == null) {
|
||||
if (missingLogged.compareAndSet(false, true)) {
|
||||
LOGGER.warn("leader supplier for key not yet available; treating node as follower until registration happens.");
|
||||
}
|
||||
if (leaderLogged.getAndSet(false)) {
|
||||
LOGGER.info("Node stepped down from leadership because supplier is unavailable.");
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
if (missingLogged.get()) {
|
||||
missingLogged.set(false);
|
||||
LOGGER.info("leader supplier is now available.");
|
||||
}
|
||||
|
||||
try {
|
||||
boolean leader = current.getAsBoolean();
|
||||
if (leader) {
|
||||
if (!leaderLogged.getAndSet(true)) {
|
||||
LOGGER.info("Node became leader");
|
||||
}
|
||||
} else {
|
||||
if (leaderLogged.getAndSet(false)) {
|
||||
LOGGER.info("Node stepped down from leadership");
|
||||
}
|
||||
}
|
||||
return leader;
|
||||
} catch (RuntimeException e) {
|
||||
if (leaderLogged.getAndSet(false)) {
|
||||
LOGGER.info("Node stepped down from leadership due to supplier exception.");
|
||||
}
|
||||
LOGGER.warn("leader supplier threw exception. Treating as follower.", e);
|
||||
return false;
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
|
@ -19,9 +19,6 @@ package org.apache.kafka.connect.cli;
|
|||
import org.apache.kafka.common.utils.Exit;
|
||||
import org.apache.kafka.common.utils.Time;
|
||||
import org.apache.kafka.common.utils.Utils;
|
||||
import org.apache.kafka.connect.automq.az.AzMetadataProviderHolder;
|
||||
import org.apache.kafka.connect.automq.log.ConnectLogUploader;
|
||||
import org.apache.kafka.connect.automq.metrics.OpenTelemetryMetricsReporter;
|
||||
import org.apache.kafka.connect.connector.policy.ConnectorClientConfigOverridePolicy;
|
||||
import org.apache.kafka.connect.runtime.Connect;
|
||||
import org.apache.kafka.connect.runtime.Herder;
|
||||
|
|
@ -39,7 +36,6 @@ import java.net.URI;
|
|||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
|
||||
/**
|
||||
* Common initialization logic for Kafka Connect, intended for use by command line utilities
|
||||
|
|
@ -49,9 +45,7 @@ import java.util.Properties;
|
|||
*/
|
||||
public abstract class AbstractConnectCli<H extends Herder, T extends WorkerConfig> {
|
||||
|
||||
private static Logger getLogger() {
|
||||
return LoggerFactory.getLogger(AbstractConnectCli.class);
|
||||
}
|
||||
private static final Logger log = LoggerFactory.getLogger(AbstractConnectCli.class);
|
||||
private final String[] args;
|
||||
private final Time time = Time.SYSTEM;
|
||||
|
||||
|
|
@ -89,6 +83,7 @@ public abstract class AbstractConnectCli<H extends Herder, T extends WorkerConfi
|
|||
*/
|
||||
public void run() {
|
||||
if (args.length < 1 || Arrays.asList(args).contains("--help")) {
|
||||
log.info("Usage: {}", usage());
|
||||
Exit.exit(1);
|
||||
}
|
||||
|
||||
|
|
@ -97,17 +92,6 @@ public abstract class AbstractConnectCli<H extends Herder, T extends WorkerConfi
|
|||
Map<String, String> workerProps = !workerPropsFile.isEmpty() ?
|
||||
Utils.propsToStringMap(Utils.loadProps(workerPropsFile)) : Collections.emptyMap();
|
||||
String[] extraArgs = Arrays.copyOfRange(args, 1, args.length);
|
||||
|
||||
// AutoMQ inject start
|
||||
// Initialize S3 log uploader and OpenTelemetry with worker properties
|
||||
ConnectLogUploader.initialize(workerProps);
|
||||
AzMetadataProviderHolder.initialize(workerProps);
|
||||
|
||||
Properties telemetryProps = new Properties();
|
||||
telemetryProps.putAll(workerProps);
|
||||
OpenTelemetryMetricsReporter.initializeTelemetry(telemetryProps);
|
||||
// AutoMQ inject end
|
||||
|
||||
Connect<H> connect = startConnect(workerProps);
|
||||
processExtraArgs(connect, extraArgs);
|
||||
|
||||
|
|
@ -115,7 +99,7 @@ public abstract class AbstractConnectCli<H extends Herder, T extends WorkerConfi
|
|||
connect.awaitStop();
|
||||
|
||||
} catch (Throwable t) {
|
||||
getLogger().error("Stopping due to error", t);
|
||||
log.error("Stopping due to error", t);
|
||||
Exit.exit(2);
|
||||
}
|
||||
}
|
||||
|
|
@ -127,17 +111,17 @@ public abstract class AbstractConnectCli<H extends Herder, T extends WorkerConfi
|
|||
* @return a started instance of {@link Connect}
|
||||
*/
|
||||
public Connect<H> startConnect(Map<String, String> workerProps) {
|
||||
getLogger().info("Kafka Connect worker initializing ...");
|
||||
log.info("Kafka Connect worker initializing ...");
|
||||
long initStart = time.hiResClockMs();
|
||||
|
||||
WorkerInfo initInfo = new WorkerInfo();
|
||||
initInfo.logAll();
|
||||
|
||||
getLogger().info("Scanning for plugin classes. This might take a moment ...");
|
||||
log.info("Scanning for plugin classes. This might take a moment ...");
|
||||
Plugins plugins = new Plugins(workerProps);
|
||||
plugins.compareAndSwapWithDelegatingLoader();
|
||||
T config = createConfig(workerProps);
|
||||
getLogger().debug("Kafka cluster ID: {}", config.kafkaClusterId());
|
||||
log.debug("Kafka cluster ID: {}", config.kafkaClusterId());
|
||||
|
||||
RestClient restClient = new RestClient(config);
|
||||
|
||||
|
|
@ -154,11 +138,11 @@ public abstract class AbstractConnectCli<H extends Herder, T extends WorkerConfi
|
|||
H herder = createHerder(config, workerId, plugins, connectorClientConfigOverridePolicy, restServer, restClient);
|
||||
|
||||
final Connect<H> connect = new Connect<>(herder, restServer);
|
||||
getLogger().info("Kafka Connect worker initialization took {}ms", time.hiResClockMs() - initStart);
|
||||
log.info("Kafka Connect worker initialization took {}ms", time.hiResClockMs() - initStart);
|
||||
try {
|
||||
connect.start();
|
||||
} catch (Exception e) {
|
||||
getLogger().error("Failed to start Connect", e);
|
||||
log.error("Failed to start Connect", e);
|
||||
connect.stop();
|
||||
Exit.exit(3);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,7 +17,6 @@
|
|||
package org.apache.kafka.connect.cli;
|
||||
|
||||
import org.apache.kafka.common.utils.Time;
|
||||
import org.apache.kafka.connect.automq.runtime.RuntimeLeaderRegistry;
|
||||
import org.apache.kafka.connect.connector.policy.ConnectorClientConfigOverridePolicy;
|
||||
import org.apache.kafka.connect.json.JsonConverter;
|
||||
import org.apache.kafka.connect.json.JsonConverterConfig;
|
||||
|
|
@ -40,7 +39,6 @@ import org.apache.kafka.connect.util.SharedTopicAdmin;
|
|||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.function.BooleanSupplier;
|
||||
|
||||
import static org.apache.kafka.clients.CommonClientConfigs.CLIENT_ID_CONFIG;
|
||||
|
||||
|
|
@ -98,16 +96,10 @@ public class ConnectDistributed extends AbstractConnectCli<DistributedHerder, Di
|
|||
|
||||
// Pass the shared admin to the distributed herder as an additional AutoCloseable object that should be closed when the
|
||||
// herder is stopped. This is easier than having to track and own the lifecycle ourselves.
|
||||
DistributedHerder herder = new DistributedHerder(config, Time.SYSTEM, worker,
|
||||
return new DistributedHerder(config, Time.SYSTEM, worker,
|
||||
kafkaClusterId, statusBackingStore, configBackingStore,
|
||||
restServer.advertisedUrl().toString(), restClient, connectorClientConfigOverridePolicy,
|
||||
Collections.emptyList(), sharedAdmin);
|
||||
// AutoMQ for Kafka connect inject start
|
||||
BooleanSupplier leaderSupplier = herder::isLeaderInstance;
|
||||
RuntimeLeaderRegistry.register(leaderSupplier);
|
||||
// AutoMQ for Kafka connect inject end
|
||||
|
||||
return herder;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
|||
|
|
@ -21,8 +21,6 @@ import org.apache.kafka.connect.runtime.distributed.DistributedHerder;
|
|||
import org.apache.kafka.connect.runtime.rest.ConnectRestServer;
|
||||
import org.apache.kafka.connect.runtime.rest.RestServer;
|
||||
|
||||
import com.automq.log.S3RollingFileAppender;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
|
@ -117,9 +115,6 @@ public class Connect<H extends Herder> {
|
|||
try {
|
||||
startLatch.await();
|
||||
Connect.this.stop();
|
||||
// AutoMQ inject start
|
||||
S3RollingFileAppender.shutdown();
|
||||
// AutoMQ inject end
|
||||
} catch (InterruptedException e) {
|
||||
log.error("Interrupted in shutdown hook while waiting for Kafka Connect startup to finish");
|
||||
}
|
||||
|
|
|
|||
|
|
@ -48,7 +48,6 @@ import org.apache.kafka.common.utils.ThreadUtils;
|
|||
import org.apache.kafka.common.utils.Time;
|
||||
import org.apache.kafka.common.utils.Timer;
|
||||
import org.apache.kafka.common.utils.Utils;
|
||||
import org.apache.kafka.connect.automq.az.AzAwareClientConfigurator;
|
||||
import org.apache.kafka.connect.connector.ConnectRecord;
|
||||
import org.apache.kafka.connect.connector.Connector;
|
||||
import org.apache.kafka.connect.connector.Task;
|
||||
|
|
@ -842,10 +841,6 @@ public class Worker {
|
|||
connectorClientConfigOverridePolicy);
|
||||
producerProps.putAll(producerOverrides);
|
||||
|
||||
// AutoMQ for Kafka inject start
|
||||
AzAwareClientConfigurator.maybeApplyProducerAz(producerProps, defaultClientId);
|
||||
// AutoMQ for Kafka inject end
|
||||
|
||||
return producerProps;
|
||||
}
|
||||
|
||||
|
|
@ -914,10 +909,6 @@ public class Worker {
|
|||
connectorClientConfigOverridePolicy);
|
||||
consumerProps.putAll(consumerOverrides);
|
||||
|
||||
// AutoMQ for Kafka inject start
|
||||
AzAwareClientConfigurator.maybeApplyConsumerAz(consumerProps, defaultClientId);
|
||||
// AutoMQ for Kafka inject end
|
||||
|
||||
return consumerProps;
|
||||
}
|
||||
|
||||
|
|
@ -947,10 +938,6 @@ public class Worker {
|
|||
// Admin client-specific overrides in the worker config
|
||||
adminProps.putAll(config.originalsWithPrefix("admin."));
|
||||
|
||||
// AutoMQ for Kafka inject start
|
||||
AzAwareClientConfigurator.maybeApplyAdminAz(adminProps, defaultClientId);
|
||||
// AutoMQ for Kafka inject end
|
||||
|
||||
// Connector-specified overrides
|
||||
Map<String, Object> adminOverrides =
|
||||
connectorClientConfigOverrides(connName, connConfig, connectorClass, ConnectorConfig.CONNECTOR_CLIENT_ADMIN_OVERRIDES_PREFIX,
|
||||
|
|
|
|||
|
|
@ -1735,12 +1735,6 @@ public class DistributedHerder extends AbstractHerder implements Runnable {
|
|||
configBackingStore.putLoggerLevel(namespace, level);
|
||||
}
|
||||
|
||||
// AutoMQ inject start
|
||||
public boolean isLeaderInstance() {
|
||||
return isLeader();
|
||||
}
|
||||
// AutoMQ inject end
|
||||
|
||||
// Should only be called from work thread, so synchronization should not be needed
|
||||
protected boolean isLeader() {
|
||||
return assignment != null && member.memberId().equals(assignment.leader());
|
||||
|
|
|
|||
|
|
@ -35,7 +35,6 @@ import org.apache.kafka.common.serialization.StringSerializer;
|
|||
import org.apache.kafka.common.utils.Time;
|
||||
import org.apache.kafka.common.utils.Timer;
|
||||
import org.apache.kafka.common.utils.Utils;
|
||||
import org.apache.kafka.connect.automq.az.AzAwareClientConfigurator;
|
||||
import org.apache.kafka.connect.data.Schema;
|
||||
import org.apache.kafka.connect.data.SchemaAndValue;
|
||||
import org.apache.kafka.connect.data.SchemaBuilder;
|
||||
|
|
@ -441,9 +440,6 @@ public class KafkaConfigBackingStore extends KafkaTopicBasedBackingStore impleme
|
|||
Map<String, Object> result = new HashMap<>(baseProducerProps(workerConfig));
|
||||
|
||||
result.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-leader");
|
||||
// AutoMQ for Kafka inject start
|
||||
AzAwareClientConfigurator.maybeApplyProducerAz(result, "config-log-leader");
|
||||
// AutoMQ for Kafka inject end
|
||||
// Always require producer acks to all to ensure durable writes
|
||||
result.put(ProducerConfig.ACKS_CONFIG, "all");
|
||||
// We can set this to 5 instead of 1 without risking reordering because we are using an idempotent producer
|
||||
|
|
@ -777,17 +773,11 @@ public class KafkaConfigBackingStore extends KafkaTopicBasedBackingStore impleme
|
|||
|
||||
Map<String, Object> producerProps = new HashMap<>(baseProducerProps);
|
||||
producerProps.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId);
|
||||
// AutoMQ for Kafka inject start
|
||||
AzAwareClientConfigurator.maybeApplyProducerAz(producerProps, "config-log");
|
||||
// AutoMQ for Kafka inject end
|
||||
|
||||
Map<String, Object> consumerProps = new HashMap<>(originals);
|
||||
consumerProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
|
||||
consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName());
|
||||
consumerProps.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId);
|
||||
// AutoMQ for Kafka inject start
|
||||
AzAwareClientConfigurator.maybeApplyConsumerAz(consumerProps, "config-log");
|
||||
// AutoMQ for Kafka inject end
|
||||
ConnectUtils.addMetricsContextProperties(consumerProps, config, clusterId);
|
||||
if (config.exactlyOnceSourceEnabled()) {
|
||||
ConnectUtils.ensureProperty(
|
||||
|
|
@ -800,9 +790,6 @@ public class KafkaConfigBackingStore extends KafkaTopicBasedBackingStore impleme
|
|||
Map<String, Object> adminProps = new HashMap<>(originals);
|
||||
ConnectUtils.addMetricsContextProperties(adminProps, config, clusterId);
|
||||
adminProps.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId);
|
||||
// AutoMQ for Kafka inject start
|
||||
AzAwareClientConfigurator.maybeApplyAdminAz(adminProps, "config-log");
|
||||
// AutoMQ for Kafka inject end
|
||||
|
||||
Map<String, Object> topicSettings = config instanceof DistributedConfig
|
||||
? ((DistributedConfig) config).configStorageTopicSettings()
|
||||
|
|
|
|||
|
|
@ -30,7 +30,6 @@ import org.apache.kafka.common.errors.UnsupportedVersionException;
|
|||
import org.apache.kafka.common.serialization.ByteArrayDeserializer;
|
||||
import org.apache.kafka.common.serialization.ByteArraySerializer;
|
||||
import org.apache.kafka.common.utils.Time;
|
||||
import org.apache.kafka.connect.automq.az.AzAwareClientConfigurator;
|
||||
import org.apache.kafka.connect.errors.ConnectException;
|
||||
import org.apache.kafka.connect.runtime.WorkerConfig;
|
||||
import org.apache.kafka.connect.runtime.distributed.DistributedConfig;
|
||||
|
|
@ -193,18 +192,12 @@ public class KafkaOffsetBackingStore extends KafkaTopicBasedBackingStore impleme
|
|||
// gets approved and scheduled for release.
|
||||
producerProps.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "false");
|
||||
producerProps.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId);
|
||||
// AutoMQ for Kafka inject start
|
||||
AzAwareClientConfigurator.maybeApplyProducerAz(producerProps, "offset-log");
|
||||
// AutoMQ for Kafka inject end
|
||||
ConnectUtils.addMetricsContextProperties(producerProps, config, clusterId);
|
||||
|
||||
Map<String, Object> consumerProps = new HashMap<>(originals);
|
||||
consumerProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName());
|
||||
consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName());
|
||||
consumerProps.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId);
|
||||
// AutoMQ for Kafka inject start
|
||||
AzAwareClientConfigurator.maybeApplyConsumerAz(consumerProps, "offset-log");
|
||||
// AutoMQ for Kafka inject end
|
||||
ConnectUtils.addMetricsContextProperties(consumerProps, config, clusterId);
|
||||
if (config.exactlyOnceSourceEnabled()) {
|
||||
ConnectUtils.ensureProperty(
|
||||
|
|
@ -216,9 +209,6 @@ public class KafkaOffsetBackingStore extends KafkaTopicBasedBackingStore impleme
|
|||
|
||||
Map<String, Object> adminProps = new HashMap<>(originals);
|
||||
adminProps.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId);
|
||||
// AutoMQ for Kafka inject start
|
||||
AzAwareClientConfigurator.maybeApplyAdminAz(adminProps, "offset-log");
|
||||
// AutoMQ for Kafka inject end
|
||||
ConnectUtils.addMetricsContextProperties(adminProps, config, clusterId);
|
||||
NewTopic topicDescription = newTopicDescription(topic, config);
|
||||
|
||||
|
|
|
|||
|
|
@ -30,7 +30,6 @@ import org.apache.kafka.common.serialization.StringDeserializer;
|
|||
import org.apache.kafka.common.serialization.StringSerializer;
|
||||
import org.apache.kafka.common.utils.ThreadUtils;
|
||||
import org.apache.kafka.common.utils.Time;
|
||||
import org.apache.kafka.connect.automq.az.AzAwareClientConfigurator;
|
||||
import org.apache.kafka.connect.data.Schema;
|
||||
import org.apache.kafka.connect.data.SchemaAndValue;
|
||||
import org.apache.kafka.connect.data.SchemaBuilder;
|
||||
|
|
@ -184,25 +183,16 @@ public class KafkaStatusBackingStore extends KafkaTopicBasedBackingStore impleme
|
|||
// gets approved and scheduled for release.
|
||||
producerProps.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "false"); // disable idempotence since retries is force to 0
|
||||
producerProps.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId);
|
||||
// AutoMQ for Kafka inject start
|
||||
AzAwareClientConfigurator.maybeApplyProducerAz(producerProps, "status-log");
|
||||
// AutoMQ for Kafka inject end
|
||||
ConnectUtils.addMetricsContextProperties(producerProps, config, clusterId);
|
||||
|
||||
Map<String, Object> consumerProps = new HashMap<>(originals);
|
||||
consumerProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
|
||||
consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName());
|
||||
consumerProps.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId);
|
||||
// AutoMQ for Kafka inject start
|
||||
AzAwareClientConfigurator.maybeApplyConsumerAz(consumerProps, "status-log");
|
||||
// AutoMQ for Kafka inject end
|
||||
ConnectUtils.addMetricsContextProperties(consumerProps, config, clusterId);
|
||||
|
||||
Map<String, Object> adminProps = new HashMap<>(originals);
|
||||
adminProps.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId);
|
||||
// AutoMQ for Kafka inject start
|
||||
AzAwareClientConfigurator.maybeApplyAdminAz(adminProps, "status-log");
|
||||
// AutoMQ for Kafka inject end
|
||||
ConnectUtils.addMetricsContextProperties(adminProps, config, clusterId);
|
||||
|
||||
Map<String, Object> topicSettings = config instanceof DistributedConfig
|
||||
|
|
|
|||
|
|
@ -1,115 +0,0 @@
|
|||
package org.apache.kafka.connect.automq;
|
||||
|
||||
import org.apache.kafka.clients.admin.AdminClientConfig;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.producer.ProducerConfig;
|
||||
import org.apache.kafka.connect.automq.az.AzAwareClientConfigurator;
|
||||
import org.apache.kafka.connect.automq.az.AzMetadataProvider;
|
||||
import org.apache.kafka.connect.automq.az.AzMetadataProviderHolder;
|
||||
|
||||
import org.junit.jupiter.api.AfterEach;
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
|
||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||
import static org.junit.jupiter.api.Assertions.assertFalse;
|
||||
|
||||
class AzAwareClientConfiguratorTest {
|
||||
|
||||
@AfterEach
|
||||
void resetProvider() {
|
||||
AzMetadataProviderHolder.setProviderForTest(null);
|
||||
}
|
||||
|
||||
@Test
|
||||
void shouldDecorateProducerClientId() {
|
||||
AzMetadataProviderHolder.setProviderForTest(new FixedAzProvider("us-east-1a"));
|
||||
Map<String, Object> props = new HashMap<>();
|
||||
props.put(ProducerConfig.CLIENT_ID_CONFIG, "producer-1");
|
||||
|
||||
AzAwareClientConfigurator.maybeApplyProducerAz(props, "producer-1");
|
||||
|
||||
assertEquals("automq_type=producer&automq_role=producer-1&automq_az=us-east-1a&producer-1",
|
||||
props.get(ProducerConfig.CLIENT_ID_CONFIG));
|
||||
}
|
||||
|
||||
@Test
|
||||
void shouldPreserveCustomClientIdInAzConfig() {
|
||||
AzMetadataProviderHolder.setProviderForTest(new FixedAzProvider("us-east-1a"));
|
||||
Map<String, Object> props = new HashMap<>();
|
||||
props.put(ProducerConfig.CLIENT_ID_CONFIG, "custom-id");
|
||||
|
||||
AzAwareClientConfigurator.maybeApplyProducerAz(props, "producer-1");
|
||||
|
||||
assertEquals("automq_type=producer&automq_role=producer-1&automq_az=us-east-1a&custom-id",
|
||||
props.get(ProducerConfig.CLIENT_ID_CONFIG));
|
||||
}
|
||||
|
||||
@Test
|
||||
void shouldAssignRackForConsumers() {
|
||||
AzMetadataProviderHolder.setProviderForTest(new FixedAzProvider("us-west-2c"));
|
||||
Map<String, Object> props = new HashMap<>();
|
||||
props.put(ConsumerConfig.CLIENT_ID_CONFIG, "consumer-1");
|
||||
|
||||
AzAwareClientConfigurator.maybeApplyConsumerAz(props, "consumer-1");
|
||||
|
||||
assertEquals("us-west-2c", props.get(ConsumerConfig.CLIENT_RACK_CONFIG));
|
||||
}
|
||||
|
||||
@Test
|
||||
void shouldDecorateAdminClientId() {
|
||||
AzMetadataProviderHolder.setProviderForTest(new FixedAzProvider("eu-west-1b"));
|
||||
Map<String, Object> props = new HashMap<>();
|
||||
props.put(AdminClientConfig.CLIENT_ID_CONFIG, "admin-1");
|
||||
|
||||
AzAwareClientConfigurator.maybeApplyAdminAz(props, "admin-1");
|
||||
|
||||
assertEquals("automq_type=admin&automq_role=admin-1&automq_az=eu-west-1b&admin-1",
|
||||
props.get(AdminClientConfig.CLIENT_ID_CONFIG));
|
||||
}
|
||||
|
||||
@Test
|
||||
void shouldLeaveClientIdWhenAzUnavailable() {
|
||||
AzMetadataProviderHolder.setProviderForTest(new AzMetadataProvider() {
|
||||
@Override
|
||||
public Optional<String> availabilityZoneId() {
|
||||
return Optional.empty();
|
||||
}
|
||||
});
|
||||
Map<String, Object> props = new HashMap<>();
|
||||
props.put(ProducerConfig.CLIENT_ID_CONFIG, "producer-1");
|
||||
|
||||
AzAwareClientConfigurator.maybeApplyProducerAz(props, "producer-1");
|
||||
|
||||
assertEquals("producer-1", props.get(ProducerConfig.CLIENT_ID_CONFIG));
|
||||
assertFalse(props.containsKey(ConsumerConfig.CLIENT_RACK_CONFIG));
|
||||
}
|
||||
|
||||
@Test
|
||||
void shouldEncodeSpecialCharactersInClientId() {
|
||||
AzMetadataProviderHolder.setProviderForTest(new FixedAzProvider("us-east-1a"));
|
||||
Map<String, Object> props = new HashMap<>();
|
||||
props.put(ProducerConfig.CLIENT_ID_CONFIG, "client-with-spaces & symbols");
|
||||
|
||||
AzAwareClientConfigurator.maybeApplyProducerAz(props, "test-role");
|
||||
|
||||
assertEquals("automq_type=producer&automq_role=test-role&automq_az=us-east-1a&client-with-spaces & symbols",
|
||||
props.get(ProducerConfig.CLIENT_ID_CONFIG));
|
||||
}
|
||||
|
||||
private static final class FixedAzProvider implements AzMetadataProvider {
|
||||
private final String az;
|
||||
|
||||
private FixedAzProvider(String az) {
|
||||
this.az = az;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<String> availabilityZoneId() {
|
||||
return Optional.ofNullable(az);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,133 +0,0 @@
|
|||
diff --git a/container/bitnami/Dockerfile b/container/bitnami/Dockerfile
|
||||
index 717a36c21f..ea5eb74efb 100644
|
||||
--- a/container/bitnami/Dockerfile
|
||||
+++ b/container/bitnami/Dockerfile
|
||||
@@ -1,21 +1,25 @@
|
||||
# Copyright Broadcom, Inc. All Rights Reserved.
|
||||
# SPDX-License-Identifier: APACHE-2.0
|
||||
|
||||
+FROM docker.io/bitnami/minideb:bookworm as extractor
|
||||
+
|
||||
+COPY automq-*.tgz /tmp/
|
||||
+RUN mkdir -p /output && \
|
||||
+ tar -zxf /tmp/automq-*.tgz -C /output --strip-components=1
|
||||
+
|
||||
FROM docker.io/bitnami/minideb:bookworm
|
||||
|
||||
ARG DOWNLOADS_URL="downloads.bitnami.com/files/stacksmith"
|
||||
ARG JAVA_EXTRA_SECURITY_DIR="/bitnami/java/extra-security"
|
||||
ARG TARGETARCH
|
||||
|
||||
-LABEL com.vmware.cp.artifact.flavor="sha256:c50c90cfd9d12b445b011e6ad529f1ad3daea45c26d20b00732fae3cd71f6a83" \
|
||||
- org.opencontainers.image.base.name="docker.io/bitnami/minideb:bookworm" \
|
||||
- org.opencontainers.image.created="2025-03-15T19:51:22Z" \
|
||||
- org.opencontainers.image.description="Application packaged by Broadcom, Inc." \
|
||||
- org.opencontainers.image.documentation="https://github.com/bitnami/containers/tree/main/bitnami/kafka/README.md" \
|
||||
+LABEL org.opencontainers.image.base.name="docker.io/bitnami/minideb:bookworm" \
|
||||
+ org.opencontainers.image.created="2025-04-27T21:51:40Z" \
|
||||
+ org.opencontainers.image.description="AutoMQ packaged by AutoMQ, Inc." \
|
||||
org.opencontainers.image.ref.name="3.9.0-debian-12-r13" \
|
||||
- org.opencontainers.image.source="https://github.com/bitnami/containers/tree/main/bitnami/kafka" \
|
||||
- org.opencontainers.image.title="kafka" \
|
||||
- org.opencontainers.image.vendor="Broadcom, Inc." \
|
||||
+ org.opencontainers.image.source="https://github.com/AutoMQ/automq/tree/main/container" \
|
||||
+ org.opencontainers.image.title="automq" \
|
||||
+ org.opencontainers.image.vendor="AutoMQ, Inc." \
|
||||
org.opencontainers.image.version="3.9.0"
|
||||
|
||||
ENV HOME="/" \
|
||||
@@ -26,12 +30,11 @@ ENV HOME="/" \
|
||||
COPY prebuildfs /
|
||||
SHELL ["/bin/bash", "-o", "errexit", "-o", "nounset", "-o", "pipefail", "-c"]
|
||||
# Install required system packages and dependencies
|
||||
-RUN install_packages ca-certificates curl procps zlib1g
|
||||
-RUN mkdir -p /tmp/bitnami/pkg/cache/ ; cd /tmp/bitnami/pkg/cache/ ; \
|
||||
+RUN apt-get update && apt-get install -y ca-certificates curl procps zlib1g libjemalloc-dev && \
|
||||
+ apt-get clean && rm -rf /var/lib/apt/lists /var/cache/apt/archives
|
||||
+RUN mkdir -p /tmp/bitnami/pkg/cache/ ; cd /tmp/bitnami/pkg/cache/ || exit 1 ; \
|
||||
COMPONENTS=( \
|
||||
- "wait-for-port-1.0.8-14-linux-${OS_ARCH}-debian-12" \
|
||||
"jre-17.0.14-10-1-linux-${OS_ARCH}-debian-12" \
|
||||
- "kafka-3.9.0-2-linux-${OS_ARCH}-debian-12" \
|
||||
) ; \
|
||||
for COMPONENT in "${COMPONENTS[@]}"; do \
|
||||
if [ ! -f "${COMPONENT}.tar.gz" ]; then \
|
||||
@@ -39,17 +42,18 @@ RUN mkdir -p /tmp/bitnami/pkg/cache/ ; cd /tmp/bitnami/pkg/cache/ ; \
|
||||
curl -SsLf "https://${DOWNLOADS_URL}/${COMPONENT}.tar.gz.sha256" -O ; \
|
||||
fi ; \
|
||||
sha256sum -c "${COMPONENT}.tar.gz.sha256" ; \
|
||||
- tar -zxf "${COMPONENT}.tar.gz" -C /opt/bitnami --strip-components=2 --no-same-owner --wildcards '*/files' ; \
|
||||
+ tar -zxf "${COMPONENT}.tar.gz" -C /opt/bitnami --strip-components=2 --no-same-owner ; \
|
||||
rm -rf "${COMPONENT}".tar.gz{,.sha256} ; \
|
||||
done
|
||||
-RUN apt-get update && apt-get upgrade -y && \
|
||||
- apt-get clean && rm -rf /var/lib/apt/lists /var/cache/apt/archives
|
||||
+COPY --from=extractor --chown=1001:0 /output /opt/bitnami/kafka
|
||||
RUN chmod g+rwX /opt/bitnami
|
||||
RUN find / -perm /6000 -type f -exec chmod a-s {} \; || true
|
||||
RUN ln -s /opt/bitnami/scripts/kafka/entrypoint.sh /entrypoint.sh
|
||||
RUN ln -s /opt/bitnami/scripts/kafka/run.sh /run.sh
|
||||
|
||||
COPY rootfs /
|
||||
+RUN find /opt/bitnami/scripts -type f -exec chmod g+rwX {} \; && \
|
||||
+ find /opt/bitnami/scripts -type f -exec chmod +x {} \;
|
||||
RUN /opt/bitnami/scripts/java/postunpack.sh
|
||||
RUN /opt/bitnami/scripts/kafka/postunpack.sh
|
||||
ENV APP_VERSION="3.9.0" \
|
||||
@@ -59,6 +63,12 @@ ENV APP_VERSION="3.9.0" \
|
||||
|
||||
EXPOSE 9092
|
||||
|
||||
+RUN arch=$(uname -m) \
|
||||
+ && target_file="/usr/lib/${arch}-linux-gnu/libjemalloc.so" \
|
||||
+ && { test -f "$target_file" || { echo "Error: $target_file not found"; exit 1; }; } \
|
||||
+ && ln -sv "$target_file" /usr/lib/libjemalloc.so
|
||||
+ENV LD_PRELOAD="/usr/lib/libjemalloc.so"
|
||||
+
|
||||
USER 1001
|
||||
ENTRYPOINT [ "/opt/bitnami/scripts/kafka/entrypoint.sh" ]
|
||||
CMD [ "/opt/bitnami/scripts/kafka/run.sh" ]
|
||||
diff --git a/container/bitnami/prebuildfs/opt/bitnami/scripts/libbitnami.sh b/container/bitnami/prebuildfs/opt/bitnami/scripts/libbitnami.sh
|
||||
index 00d053b521..09e3d3084d 100644
|
||||
--- a/container/bitnami/prebuildfs/opt/bitnami/scripts/libbitnami.sh
|
||||
+++ b/container/bitnami/prebuildfs/opt/bitnami/scripts/libbitnami.sh
|
||||
@@ -42,12 +42,13 @@ print_welcome_page() {
|
||||
# None
|
||||
#########################
|
||||
print_image_welcome_page() {
|
||||
- local github_url="https://github.com/bitnami/containers"
|
||||
+ local docs_url="https://www.automq.com/docs/automq/deployment/deploy-multi-nodes-cluster-on-kubernetes"
|
||||
|
||||
info ""
|
||||
- info "${BOLD}Welcome to the Bitnami ${BITNAMI_APP_NAME} container${RESET}"
|
||||
- info "Subscribe to project updates by watching ${BOLD}${github_url}${RESET}"
|
||||
- info "Did you know there are enterprise versions of the Bitnami catalog? For enhanced secure software supply chain features, unlimited pulls from Docker, LTS support, or application customization, see Bitnami Premium or Tanzu Application Catalog. See https://www.arrow.com/globalecs/na/vendors/bitnami/ for more information."
|
||||
+ info "${BOLD}Welcome to the AutoMQ for Apache Kafka on Bitnami Container${RESET}"
|
||||
+ info "${BOLD}This image is compatible with Bitnami's container standards.${RESET}"
|
||||
+ info "Refer to the documentation for complete configuration and Kubernetes deployment guidelines:"
|
||||
+ info "${BOLD}${docs_url}${RESET}"
|
||||
info ""
|
||||
}
|
||||
|
||||
diff --git a/container/bitnami/rootfs/opt/bitnami/scripts/kafka/postunpack.sh b/container/bitnami/rootfs/opt/bitnami/scripts/kafka/postunpack.sh
|
||||
index 7255563236..673c84e721 100644
|
||||
--- a/container/bitnami/rootfs/opt/bitnami/scripts/kafka/postunpack.sh
|
||||
+++ b/container/bitnami/rootfs/opt/bitnami/scripts/kafka/postunpack.sh
|
||||
@@ -35,12 +35,12 @@ mv "${KAFKA_CONF_DIR}/server.properties" "${KAFKA_CONF_DIR}/server.properties.or
|
||||
|
||||
# Disable logging to stdout and garbage collection
|
||||
# Source: https://logging.apache.org/log4j/log4j-2.4/manual/appenders.html
|
||||
-replace_in_file "${KAFKA_BASE_DIR}/bin/kafka-server-start.sh" " [-]loggc" " "
|
||||
-replace_in_file "${KAFKA_CONF_DIR}/log4j.properties" "DailyRollingFileAppender" "ConsoleAppender"
|
||||
+#replace_in_file "${KAFKA_BASE_DIR}/bin/kafka-server-start.sh" " [-]loggc" " "
|
||||
+#replace_in_file "${KAFKA_CONF_DIR}/log4j.properties" "DailyRollingFileAppender" "ConsoleAppender"
|
||||
|
||||
# Disable the default console logger in favour of KafkaAppender (which provides the exact output)
|
||||
-echo "log4j.appender.stdout.Threshold=OFF" >>"${KAFKA_CONF_DIR}/log4j.properties"
|
||||
+#echo "log4j.appender.stdout.Threshold=OFF" >>"${KAFKA_CONF_DIR}/log4j.properties"
|
||||
|
||||
# Remove invalid parameters for ConsoleAppender
|
||||
-remove_in_file "${KAFKA_CONF_DIR}/log4j.properties" "DatePattern"
|
||||
-remove_in_file "${KAFKA_CONF_DIR}/log4j.properties" "Appender.File"
|
||||
+#remove_in_file "${KAFKA_CONF_DIR}/log4j.properties" "DatePattern"
|
||||
+#remove_in_file "${KAFKA_CONF_DIR}/log4j.properties" "Appender.File"
|
||||
|
|
@ -1,74 +0,0 @@
|
|||
# Copyright Broadcom, Inc. All Rights Reserved.
|
||||
# SPDX-License-Identifier: APACHE-2.0
|
||||
|
||||
FROM docker.io/bitnami/minideb:bookworm as extractor
|
||||
|
||||
COPY automq-*.tgz /tmp/
|
||||
RUN mkdir -p /output && \
|
||||
tar -zxf /tmp/automq-*.tgz -C /output --strip-components=1
|
||||
|
||||
FROM docker.io/bitnami/minideb:bookworm
|
||||
|
||||
ARG DOWNLOADS_URL="downloads.bitnami.com/files/stacksmith"
|
||||
ARG JAVA_EXTRA_SECURITY_DIR="/bitnami/java/extra-security"
|
||||
ARG TARGETARCH
|
||||
|
||||
LABEL org.opencontainers.image.base.name="docker.io/bitnami/minideb:bookworm" \
|
||||
org.opencontainers.image.created="2025-04-27T21:51:40Z" \
|
||||
org.opencontainers.image.description="AutoMQ packaged by AutoMQ, Inc." \
|
||||
org.opencontainers.image.ref.name="3.9.0-debian-12-r13" \
|
||||
org.opencontainers.image.source="https://github.com/AutoMQ/automq/tree/main/container" \
|
||||
org.opencontainers.image.title="automq" \
|
||||
org.opencontainers.image.vendor="AutoMQ, Inc." \
|
||||
org.opencontainers.image.version="3.9.0"
|
||||
|
||||
ENV HOME="/" \
|
||||
OS_ARCH="${TARGETARCH:-amd64}" \
|
||||
OS_FLAVOUR="debian-12" \
|
||||
OS_NAME="linux"
|
||||
|
||||
COPY prebuildfs /
|
||||
SHELL ["/bin/bash", "-o", "errexit", "-o", "nounset", "-o", "pipefail", "-c"]
|
||||
# Install required system packages and dependencies
|
||||
RUN apt-get update && apt-get install -y ca-certificates curl procps zlib1g libjemalloc-dev && \
|
||||
apt-get clean && rm -rf /var/lib/apt/lists /var/cache/apt/archives
|
||||
RUN mkdir -p /tmp/bitnami/pkg/cache/ ; cd /tmp/bitnami/pkg/cache/ || exit 1 ; \
|
||||
COMPONENTS=( \
|
||||
"jre-17.0.14-10-1-linux-${OS_ARCH}-debian-12" \
|
||||
) ; \
|
||||
for COMPONENT in "${COMPONENTS[@]}"; do \
|
||||
if [ ! -f "${COMPONENT}.tar.gz" ]; then \
|
||||
curl -SsLf "https://${DOWNLOADS_URL}/${COMPONENT}.tar.gz" -O ; \
|
||||
curl -SsLf "https://${DOWNLOADS_URL}/${COMPONENT}.tar.gz.sha256" -O ; \
|
||||
fi ; \
|
||||
sha256sum -c "${COMPONENT}.tar.gz.sha256" ; \
|
||||
tar -zxf "${COMPONENT}.tar.gz" -C /opt/bitnami --strip-components=2 --no-same-owner ; \
|
||||
rm -rf "${COMPONENT}".tar.gz{,.sha256} ; \
|
||||
done
|
||||
COPY --from=extractor --chown=1001:0 /output /opt/bitnami/kafka
|
||||
RUN chmod g+rwX /opt/bitnami
|
||||
RUN find / -perm /6000 -type f -exec chmod a-s {} \; || true
|
||||
RUN ln -s /opt/bitnami/scripts/kafka/entrypoint.sh /entrypoint.sh
|
||||
RUN ln -s /opt/bitnami/scripts/kafka/run.sh /run.sh
|
||||
|
||||
COPY rootfs /
|
||||
RUN find /opt/bitnami/scripts -type f -exec chmod g+rwX {} \; && \
|
||||
find /opt/bitnami/scripts -type f -exec chmod +x {} \;
|
||||
RUN /opt/bitnami/scripts/java/postunpack.sh
|
||||
RUN /opt/bitnami/scripts/kafka/postunpack.sh
|
||||
ENV APP_VERSION="3.9.0" \
|
||||
BITNAMI_APP_NAME="kafka" \
|
||||
JAVA_HOME="/opt/bitnami/java" \
|
||||
PATH="/opt/bitnami/common/bin:/opt/bitnami/java/bin:/opt/bitnami/kafka/bin:$PATH"
|
||||
|
||||
EXPOSE 9092
|
||||
|
||||
RUN arch=$(uname -m) \
|
||||
&& target_file="/usr/lib/${arch}-linux-gnu/libjemalloc.so" \
|
||||
&& { test -f "$target_file" || { echo "Error: $target_file not found"; exit 1; }; } \
|
||||
&& ln -sv "$target_file" /usr/lib/libjemalloc.so
|
||||
ENV LD_PRELOAD="/usr/lib/libjemalloc.so"
|
||||
|
||||
USER 1001
|
||||
ENTRYPOINT [ "/opt/bitnami/scripts/kafka/entrypoint.sh" ]
|
||||
CMD [ "/opt/bitnami/scripts/kafka/run.sh" ]
|
||||
|
|
@ -1,24 +0,0 @@
|
|||
# Copyright Broadcom, Inc. All Rights Reserved.
|
||||
# SPDX-License-Identifier: APACHE-2.0
|
||||
|
||||
services:
|
||||
kafka:
|
||||
image: docker.io/bitnami/kafka:3.9
|
||||
ports:
|
||||
- "9092:9092"
|
||||
volumes:
|
||||
- "kafka_data:/bitnami"
|
||||
environment:
|
||||
# KRaft settings
|
||||
- KAFKA_CFG_NODE_ID=0
|
||||
- KAFKA_CFG_PROCESS_ROLES=controller,broker
|
||||
- KAFKA_CFG_CONTROLLER_QUORUM_VOTERS=0@kafka:9093
|
||||
# Listeners
|
||||
- KAFKA_CFG_LISTENERS=PLAINTEXT://:9092,CONTROLLER://:9093
|
||||
- KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://:9092
|
||||
- KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP=CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT
|
||||
- KAFKA_CFG_CONTROLLER_LISTENER_NAMES=CONTROLLER
|
||||
- KAFKA_CFG_INTER_BROKER_LISTENER_NAME=PLAINTEXT
|
||||
volumes:
|
||||
kafka_data:
|
||||
driver: local
|
||||
|
|
@ -1,20 +0,0 @@
|
|||
{
|
||||
"jre": {
|
||||
"arch": "amd64",
|
||||
"distro": "debian-12",
|
||||
"type": "NAMI",
|
||||
"version": "17.0.14-10-1"
|
||||
},
|
||||
"kafka": {
|
||||
"arch": "amd64",
|
||||
"distro": "debian-12",
|
||||
"type": "NAMI",
|
||||
"version": "3.9.0-2"
|
||||
},
|
||||
"wait-for-port": {
|
||||
"arch": "amd64",
|
||||
"distro": "debian-12",
|
||||
"type": "NAMI",
|
||||
"version": "1.0.8-14"
|
||||
}
|
||||
}
|
||||
|
|
@ -1,2 +0,0 @@
|
|||
Bitnami containers ship with software bundles. You can find the licenses under:
|
||||
/opt/bitnami/[name-of-bundle]/licenses/[bundle-version].txt
|
||||
|
|
@ -1,54 +0,0 @@
|
|||
#!/bin/bash
|
||||
# Copyright Broadcom, Inc. All Rights Reserved.
|
||||
# SPDX-License-Identifier: APACHE-2.0
|
||||
#
|
||||
# Bitnami custom library
|
||||
|
||||
# shellcheck disable=SC1091
|
||||
|
||||
# Load Generic Libraries
|
||||
. /opt/bitnami/scripts/liblog.sh
|
||||
|
||||
# Constants
|
||||
BOLD='\033[1m'
|
||||
|
||||
# Functions
|
||||
|
||||
########################
|
||||
# Print the welcome page
|
||||
# Globals:
|
||||
# DISABLE_WELCOME_MESSAGE
|
||||
# BITNAMI_APP_NAME
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
print_welcome_page() {
|
||||
if [[ -z "${DISABLE_WELCOME_MESSAGE:-}" ]]; then
|
||||
if [[ -n "$BITNAMI_APP_NAME" ]]; then
|
||||
print_image_welcome_page
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Print the welcome page for a Bitnami Docker image
|
||||
# Globals:
|
||||
# BITNAMI_APP_NAME
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
print_image_welcome_page() {
|
||||
local docs_url="https://www.automq.com/docs/automq/deployment/deploy-multi-nodes-cluster-on-kubernetes"
|
||||
|
||||
info ""
|
||||
info "${BOLD}Welcome to the AutoMQ for Apache Kafka on Bitnami Container${RESET}"
|
||||
info "${BOLD}This image is compatible with Bitnami's container standards.${RESET}"
|
||||
info "Refer to the documentation for complete configuration and Kubernetes deployment guidelines:"
|
||||
info "${BOLD}${docs_url}${RESET}"
|
||||
info ""
|
||||
}
|
||||
|
||||
|
|
@ -1,141 +0,0 @@
|
|||
#!/bin/bash
|
||||
# Copyright Broadcom, Inc. All Rights Reserved.
|
||||
# SPDX-License-Identifier: APACHE-2.0
|
||||
#
|
||||
# Library for managing files
|
||||
|
||||
# shellcheck disable=SC1091
|
||||
|
||||
# Load Generic Libraries
|
||||
. /opt/bitnami/scripts/libos.sh
|
||||
|
||||
# Functions
|
||||
|
||||
########################
|
||||
# Replace a regex-matching string in a file
|
||||
# Arguments:
|
||||
# $1 - filename
|
||||
# $2 - match regex
|
||||
# $3 - substitute regex
|
||||
# $4 - use POSIX regex. Default: true
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
replace_in_file() {
|
||||
local filename="${1:?filename is required}"
|
||||
local match_regex="${2:?match regex is required}"
|
||||
local substitute_regex="${3:?substitute regex is required}"
|
||||
local posix_regex=${4:-true}
|
||||
|
||||
local result
|
||||
|
||||
# We should avoid using 'sed in-place' substitutions
|
||||
# 1) They are not compatible with files mounted from ConfigMap(s)
|
||||
# 2) We found incompatibility issues with Debian10 and "in-place" substitutions
|
||||
local -r del=$'\001' # Use a non-printable character as a 'sed' delimiter to avoid issues
|
||||
if [[ $posix_regex = true ]]; then
|
||||
result="$(sed -E "s${del}${match_regex}${del}${substitute_regex}${del}g" "$filename")"
|
||||
else
|
||||
result="$(sed "s${del}${match_regex}${del}${substitute_regex}${del}g" "$filename")"
|
||||
fi
|
||||
echo "$result" > "$filename"
|
||||
}
|
||||
|
||||
########################
|
||||
# Replace a regex-matching multiline string in a file
|
||||
# Arguments:
|
||||
# $1 - filename
|
||||
# $2 - match regex
|
||||
# $3 - substitute regex
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
replace_in_file_multiline() {
|
||||
local filename="${1:?filename is required}"
|
||||
local match_regex="${2:?match regex is required}"
|
||||
local substitute_regex="${3:?substitute regex is required}"
|
||||
|
||||
local result
|
||||
local -r del=$'\001' # Use a non-printable character as a 'sed' delimiter to avoid issues
|
||||
result="$(perl -pe "BEGIN{undef $/;} s${del}${match_regex}${del}${substitute_regex}${del}sg" "$filename")"
|
||||
echo "$result" > "$filename"
|
||||
}
|
||||
|
||||
########################
|
||||
# Remove a line in a file based on a regex
|
||||
# Arguments:
|
||||
# $1 - filename
|
||||
# $2 - match regex
|
||||
# $3 - use POSIX regex. Default: true
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
remove_in_file() {
|
||||
local filename="${1:?filename is required}"
|
||||
local match_regex="${2:?match regex is required}"
|
||||
local posix_regex=${3:-true}
|
||||
local result
|
||||
|
||||
# We should avoid using 'sed in-place' substitutions
|
||||
# 1) They are not compatible with files mounted from ConfigMap(s)
|
||||
# 2) We found incompatibility issues with Debian10 and "in-place" substitutions
|
||||
if [[ $posix_regex = true ]]; then
|
||||
result="$(sed -E "/$match_regex/d" "$filename")"
|
||||
else
|
||||
result="$(sed "/$match_regex/d" "$filename")"
|
||||
fi
|
||||
echo "$result" > "$filename"
|
||||
}
|
||||
|
||||
########################
|
||||
# Appends text after the last line matching a pattern
|
||||
# Arguments:
|
||||
# $1 - file
|
||||
# $2 - match regex
|
||||
# $3 - contents to add
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
append_file_after_last_match() {
|
||||
local file="${1:?missing file}"
|
||||
local match_regex="${2:?missing pattern}"
|
||||
local value="${3:?missing value}"
|
||||
|
||||
# We read the file in reverse, replace the first match (0,/pattern/s) and then reverse the results again
|
||||
result="$(tac "$file" | sed -E "0,/($match_regex)/s||${value}\n\1|" | tac)"
|
||||
echo "$result" > "$file"
|
||||
}
|
||||
|
||||
########################
|
||||
# Wait until certain entry is present in a log file
|
||||
# Arguments:
|
||||
# $1 - entry to look for
|
||||
# $2 - log file
|
||||
# $3 - max retries. Default: 12
|
||||
# $4 - sleep between retries (in seconds). Default: 5
|
||||
# Returns:
|
||||
# Boolean
|
||||
#########################
|
||||
wait_for_log_entry() {
|
||||
local -r entry="${1:-missing entry}"
|
||||
local -r log_file="${2:-missing log file}"
|
||||
local -r retries="${3:-12}"
|
||||
local -r interval_time="${4:-5}"
|
||||
local attempt=0
|
||||
|
||||
check_log_file_for_entry() {
|
||||
if ! grep -qE "$entry" "$log_file"; then
|
||||
debug "Entry \"${entry}\" still not present in ${log_file} (attempt $((++attempt))/${retries})"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
debug "Checking that ${log_file} log file contains entry \"${entry}\""
|
||||
if retry_while check_log_file_for_entry "$retries" "$interval_time"; then
|
||||
debug "Found entry \"${entry}\" in ${log_file}"
|
||||
true
|
||||
else
|
||||
error "Could not find entry \"${entry}\" in ${log_file} after ${retries} retries"
|
||||
debug_execute cat "$log_file"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
|
@ -1,193 +0,0 @@
|
|||
#!/bin/bash
|
||||
# Copyright Broadcom, Inc. All Rights Reserved.
|
||||
# SPDX-License-Identifier: APACHE-2.0
|
||||
#
|
||||
# Library for file system actions
|
||||
|
||||
# shellcheck disable=SC1091
|
||||
|
||||
# Load Generic Libraries
|
||||
. /opt/bitnami/scripts/liblog.sh
|
||||
|
||||
# Functions
|
||||
|
||||
########################
|
||||
# Ensure a file/directory is owned (user and group) but the given user
|
||||
# Arguments:
|
||||
# $1 - filepath
|
||||
# $2 - owner
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
owned_by() {
|
||||
local path="${1:?path is missing}"
|
||||
local owner="${2:?owner is missing}"
|
||||
local group="${3:-}"
|
||||
|
||||
if [[ -n $group ]]; then
|
||||
chown "$owner":"$group" "$path"
|
||||
else
|
||||
chown "$owner":"$owner" "$path"
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Ensure a directory exists and, optionally, is owned by the given user
|
||||
# Arguments:
|
||||
# $1 - directory
|
||||
# $2 - owner
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
ensure_dir_exists() {
|
||||
local dir="${1:?directory is missing}"
|
||||
local owner_user="${2:-}"
|
||||
local owner_group="${3:-}"
|
||||
|
||||
[ -d "${dir}" ] || mkdir -p "${dir}"
|
||||
if [[ -n $owner_user ]]; then
|
||||
owned_by "$dir" "$owner_user" "$owner_group"
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Checks whether a directory is empty or not
|
||||
# arguments:
|
||||
# $1 - directory
|
||||
# returns:
|
||||
# boolean
|
||||
#########################
|
||||
is_dir_empty() {
|
||||
local -r path="${1:?missing directory}"
|
||||
# Calculate real path in order to avoid issues with symlinks
|
||||
local -r dir="$(realpath "$path")"
|
||||
if [[ ! -e "$dir" ]] || [[ -z "$(ls -A "$dir")" ]]; then
|
||||
true
|
||||
else
|
||||
false
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Checks whether a mounted directory is empty or not
|
||||
# arguments:
|
||||
# $1 - directory
|
||||
# returns:
|
||||
# boolean
|
||||
#########################
|
||||
is_mounted_dir_empty() {
|
||||
local dir="${1:?missing directory}"
|
||||
|
||||
if is_dir_empty "$dir" || find "$dir" -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" -exec false {} +; then
|
||||
true
|
||||
else
|
||||
false
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Checks whether a file can be written to or not
|
||||
# arguments:
|
||||
# $1 - file
|
||||
# returns:
|
||||
# boolean
|
||||
#########################
|
||||
is_file_writable() {
|
||||
local file="${1:?missing file}"
|
||||
local dir
|
||||
dir="$(dirname "$file")"
|
||||
|
||||
if [[ (-f "$file" && -w "$file") || (! -f "$file" && -d "$dir" && -w "$dir") ]]; then
|
||||
true
|
||||
else
|
||||
false
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Relativize a path
|
||||
# arguments:
|
||||
# $1 - path
|
||||
# $2 - base
|
||||
# returns:
|
||||
# None
|
||||
#########################
|
||||
relativize() {
|
||||
local -r path="${1:?missing path}"
|
||||
local -r base="${2:?missing base}"
|
||||
pushd "$base" >/dev/null || exit
|
||||
realpath -q --no-symlinks --relative-base="$base" "$path" | sed -e 's|^/$|.|' -e 's|^/||'
|
||||
popd >/dev/null || exit
|
||||
}
|
||||
|
||||
########################
|
||||
# Configure permissions and ownership recursively
|
||||
# Globals:
|
||||
# None
|
||||
# Arguments:
|
||||
# $1 - paths (as a string).
|
||||
# Flags:
|
||||
# -f|--file-mode - mode for directories.
|
||||
# -d|--dir-mode - mode for files.
|
||||
# -u|--user - user
|
||||
# -g|--group - group
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
configure_permissions_ownership() {
|
||||
local -r paths="${1:?paths is missing}"
|
||||
local dir_mode=""
|
||||
local file_mode=""
|
||||
local user=""
|
||||
local group=""
|
||||
|
||||
# Validate arguments
|
||||
shift 1
|
||||
while [ "$#" -gt 0 ]; do
|
||||
case "$1" in
|
||||
-f | --file-mode)
|
||||
shift
|
||||
file_mode="${1:?missing mode for files}"
|
||||
;;
|
||||
-d | --dir-mode)
|
||||
shift
|
||||
dir_mode="${1:?missing mode for directories}"
|
||||
;;
|
||||
-u | --user)
|
||||
shift
|
||||
user="${1:?missing user}"
|
||||
;;
|
||||
-g | --group)
|
||||
shift
|
||||
group="${1:?missing group}"
|
||||
;;
|
||||
*)
|
||||
echo "Invalid command line flag $1" >&2
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
read -r -a filepaths <<<"$paths"
|
||||
for p in "${filepaths[@]}"; do
|
||||
if [[ -e "$p" ]]; then
|
||||
find -L "$p" -printf ""
|
||||
if [[ -n $dir_mode ]]; then
|
||||
find -L "$p" -type d ! -perm "$dir_mode" -print0 | xargs -r -0 chmod "$dir_mode"
|
||||
fi
|
||||
if [[ -n $file_mode ]]; then
|
||||
find -L "$p" -type f ! -perm "$file_mode" -print0 | xargs -r -0 chmod "$file_mode"
|
||||
fi
|
||||
if [[ -n $user ]] && [[ -n $group ]]; then
|
||||
find -L "$p" -print0 | xargs -r -0 chown "${user}:${group}"
|
||||
elif [[ -n $user ]] && [[ -z $group ]]; then
|
||||
find -L "$p" -print0 | xargs -r -0 chown "${user}"
|
||||
elif [[ -z $user ]] && [[ -n $group ]]; then
|
||||
find -L "$p" -print0 | xargs -r -0 chgrp "${group}"
|
||||
fi
|
||||
else
|
||||
stderr_print "$p does not exist"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
|
@ -1,18 +0,0 @@
|
|||
#!/bin/bash
|
||||
# Copyright Broadcom, Inc. All Rights Reserved.
|
||||
# SPDX-License-Identifier: APACHE-2.0
|
||||
#
|
||||
# Library to use for scripts expected to be used as Kubernetes lifecycle hooks
|
||||
|
||||
# shellcheck disable=SC1091
|
||||
|
||||
# Load generic libraries
|
||||
. /opt/bitnami/scripts/liblog.sh
|
||||
. /opt/bitnami/scripts/libos.sh
|
||||
|
||||
# Override functions that log to stdout/stderr of the current process, so they print to process 1
|
||||
for function_to_override in stderr_print debug_execute; do
|
||||
# Output is sent to output of process 1 and thus end up in the container log
|
||||
# The hook output in general isn't saved
|
||||
eval "$(declare -f "$function_to_override") >/proc/1/fd/1 2>/proc/1/fd/2"
|
||||
done
|
||||
|
|
@ -1,146 +0,0 @@
|
|||
#!/bin/bash
|
||||
# Copyright Broadcom, Inc. All Rights Reserved.
|
||||
# SPDX-License-Identifier: APACHE-2.0
|
||||
#
|
||||
# Library for logging functions
|
||||
|
||||
# Constants
|
||||
RESET='\033[0m'
|
||||
RED='\033[38;5;1m'
|
||||
GREEN='\033[38;5;2m'
|
||||
YELLOW='\033[38;5;3m'
|
||||
MAGENTA='\033[38;5;5m'
|
||||
CYAN='\033[38;5;6m'
|
||||
|
||||
# Functions
|
||||
|
||||
########################
|
||||
# Print to STDERR
|
||||
# Arguments:
|
||||
# Message to print
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
stderr_print() {
|
||||
# 'is_boolean_yes' is defined in libvalidations.sh, but depends on this file so we cannot source it
|
||||
local bool="${BITNAMI_QUIET:-false}"
|
||||
# comparison is performed without regard to the case of alphabetic characters
|
||||
shopt -s nocasematch
|
||||
if ! [[ "$bool" = 1 || "$bool" =~ ^(yes|true)$ ]]; then
|
||||
printf "%b\\n" "${*}" >&2
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Log message
|
||||
# Arguments:
|
||||
# Message to log
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
log() {
|
||||
local color_bool="${BITNAMI_COLOR:-true}"
|
||||
# comparison is performed without regard to the case of alphabetic characters
|
||||
shopt -s nocasematch
|
||||
if [[ "$color_bool" = 1 || "$color_bool" =~ ^(yes|true)$ ]]; then
|
||||
stderr_print "${CYAN}${MODULE:-} ${MAGENTA}$(date "+%T.%2N ")${RESET}${*}"
|
||||
else
|
||||
stderr_print "${MODULE:-} $(date "+%T.%2N ")${*}"
|
||||
fi
|
||||
}
|
||||
########################
|
||||
# Log an 'info' message
|
||||
# Arguments:
|
||||
# Message to log
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
info() {
|
||||
local msg_color=""
|
||||
local color_bool="${BITNAMI_COLOR:-true}"
|
||||
# comparison is performed without regard to the case of alphabetic characters
|
||||
shopt -s nocasematch
|
||||
if [[ "$color_bool" = 1 || "$color_bool" =~ ^(yes|true)$ ]];then
|
||||
msg_color="$GREEN"
|
||||
fi
|
||||
log "${msg_color}INFO ${RESET} ==> ${*}"
|
||||
}
|
||||
########################
|
||||
# Log message
|
||||
# Arguments:
|
||||
# Message to log
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
warn() {
|
||||
local msg_color=""
|
||||
local color_bool="${BITNAMI_COLOR:-true}"
|
||||
# comparison is performed without regard to the case of alphabetic characters
|
||||
shopt -s nocasematch
|
||||
if [[ "$color_bool" = 1 || "$color_bool" =~ ^(yes|true)$ ]];then
|
||||
msg_color="$YELLOW"
|
||||
fi
|
||||
log "${msg_color}WARN ${RESET} ==> ${*}"
|
||||
}
|
||||
########################
|
||||
# Log an 'error' message
|
||||
# Arguments:
|
||||
# Message to log
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
error() {
|
||||
local msg_color=""
|
||||
local color_bool="${BITNAMI_COLOR:-true}"
|
||||
# comparison is performed without regard to the case of alphabetic characters
|
||||
shopt -s nocasematch
|
||||
if [[ "$color_bool" = 1 || "$color_bool" =~ ^(yes|true)$ ]];then
|
||||
msg_color="$RED"
|
||||
fi
|
||||
log "${msg_color}ERROR${RESET} ==> ${*}"
|
||||
}
|
||||
########################
|
||||
# Log a 'debug' message
|
||||
# Globals:
|
||||
# BITNAMI_DEBUG
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
debug() {
|
||||
local msg_color=""
|
||||
local color_bool="${BITNAMI_COLOR:-true}"
|
||||
# comparison is performed without regard to the case of alphabetic characters
|
||||
shopt -s nocasematch
|
||||
if [[ "$color_bool" = 1 || "$color_bool" =~ ^(yes|true)$ ]] ;then
|
||||
msg_color="$MAGENTA"
|
||||
fi
|
||||
local debug_bool="${BITNAMI_DEBUG:-false}"
|
||||
if [[ "$debug_bool" = 1 || "$debug_bool" =~ ^(yes|true)$ ]]; then
|
||||
log "${msg_color}DEBUG${RESET} ==> ${*}"
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Indent a string
|
||||
# Arguments:
|
||||
# $1 - string
|
||||
# $2 - number of indentation characters (default: 4)
|
||||
# $3 - indentation character (default: " ")
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
indent() {
|
||||
local string="${1:-}"
|
||||
local num="${2:?missing num}"
|
||||
local char="${3:-" "}"
|
||||
# Build the indentation unit string
|
||||
local indent_unit=""
|
||||
for ((i = 0; i < num; i++)); do
|
||||
indent_unit="${indent_unit}${char}"
|
||||
done
|
||||
# shellcheck disable=SC2001
|
||||
# Complex regex, see https://github.com/koalaman/shellcheck/wiki/SC2001#exceptions
|
||||
echo "$string" | sed "s/^/${indent_unit}/"
|
||||
}
|
||||
|
|
@ -1,171 +0,0 @@
|
|||
#!/bin/bash
|
||||
# Copyright Broadcom, Inc. All Rights Reserved.
|
||||
# SPDX-License-Identifier: APACHE-2.0
|
||||
#
|
||||
# Library for network functions
|
||||
|
||||
# shellcheck disable=SC1091
|
||||
|
||||
# Load Generic Libraries
|
||||
. /opt/bitnami/scripts/liblog.sh
|
||||
. /opt/bitnami/scripts/libvalidations.sh
|
||||
|
||||
# Functions
|
||||
|
||||
########################
|
||||
# Resolve IP address for a host/domain (i.e. DNS lookup)
|
||||
# Arguments:
|
||||
# $1 - Hostname to resolve
|
||||
# $2 - IP address version (v4, v6), leave empty for resolving to any version
|
||||
# Returns:
|
||||
# IP
|
||||
#########################
|
||||
dns_lookup() {
|
||||
local host="${1:?host is missing}"
|
||||
local ip_version="${2:-}"
|
||||
getent "ahosts${ip_version}" "$host" | awk '/STREAM/ {print $1 }' | head -n 1
|
||||
}
|
||||
|
||||
#########################
|
||||
# Wait for a hostname and return the IP
|
||||
# Arguments:
|
||||
# $1 - hostname
|
||||
# $2 - number of retries
|
||||
# $3 - seconds to wait between retries
|
||||
# Returns:
|
||||
# - IP address that corresponds to the hostname
|
||||
#########################
|
||||
wait_for_dns_lookup() {
|
||||
local hostname="${1:?hostname is missing}"
|
||||
local retries="${2:-5}"
|
||||
local seconds="${3:-1}"
|
||||
check_host() {
|
||||
if [[ $(dns_lookup "$hostname") == "" ]]; then
|
||||
false
|
||||
else
|
||||
true
|
||||
fi
|
||||
}
|
||||
# Wait for the host to be ready
|
||||
retry_while "check_host ${hostname}" "$retries" "$seconds"
|
||||
dns_lookup "$hostname"
|
||||
}
|
||||
|
||||
########################
|
||||
# Get machine's IP
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# Machine IP
|
||||
#########################
|
||||
get_machine_ip() {
|
||||
local -a ip_addresses
|
||||
local hostname
|
||||
hostname="$(hostname)"
|
||||
read -r -a ip_addresses <<< "$(dns_lookup "$hostname" | xargs echo)"
|
||||
if [[ "${#ip_addresses[@]}" -gt 1 ]]; then
|
||||
warn "Found more than one IP address associated to hostname ${hostname}: ${ip_addresses[*]}, will use ${ip_addresses[0]}"
|
||||
elif [[ "${#ip_addresses[@]}" -lt 1 ]]; then
|
||||
error "Could not find any IP address associated to hostname ${hostname}"
|
||||
exit 1
|
||||
fi
|
||||
# Check if the first IP address is IPv6 to add brackets
|
||||
if validate_ipv6 "${ip_addresses[0]}" ; then
|
||||
echo "[${ip_addresses[0]}]"
|
||||
else
|
||||
echo "${ip_addresses[0]}"
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Check if the provided argument is a resolved hostname
|
||||
# Arguments:
|
||||
# $1 - Value to check
|
||||
# Returns:
|
||||
# Boolean
|
||||
#########################
|
||||
is_hostname_resolved() {
|
||||
local -r host="${1:?missing value}"
|
||||
if [[ -n "$(dns_lookup "$host")" ]]; then
|
||||
true
|
||||
else
|
||||
false
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Parse URL
|
||||
# Globals:
|
||||
# None
|
||||
# Arguments:
|
||||
# $1 - uri - String
|
||||
# $2 - component to obtain. Valid options (scheme, authority, userinfo, host, port, path, query or fragment) - String
|
||||
# Returns:
|
||||
# String
|
||||
parse_uri() {
|
||||
local uri="${1:?uri is missing}"
|
||||
local component="${2:?component is missing}"
|
||||
|
||||
# Solution based on https://tools.ietf.org/html/rfc3986#appendix-B with
|
||||
# additional sub-expressions to split authority into userinfo, host and port
|
||||
# Credits to Patryk Obara (see https://stackoverflow.com/a/45977232/6694969)
|
||||
local -r URI_REGEX='^(([^:/?#]+):)?(//((([^@/?#]+)@)?([^:/?#]+)(:([0-9]+))?))?(/([^?#]*))?(\?([^#]*))?(#(.*))?'
|
||||
# || | ||| | | | | | | | | |
|
||||
# |2 scheme | ||6 userinfo 7 host | 9 port | 11 rpath | 13 query | 15 fragment
|
||||
# 1 scheme: | |5 userinfo@ 8 :... 10 path 12 ?... 14 #...
|
||||
# | 4 authority
|
||||
# 3 //...
|
||||
local index=0
|
||||
case "$component" in
|
||||
scheme)
|
||||
index=2
|
||||
;;
|
||||
authority)
|
||||
index=4
|
||||
;;
|
||||
userinfo)
|
||||
index=6
|
||||
;;
|
||||
host)
|
||||
index=7
|
||||
;;
|
||||
port)
|
||||
index=9
|
||||
;;
|
||||
path)
|
||||
index=10
|
||||
;;
|
||||
query)
|
||||
index=13
|
||||
;;
|
||||
fragment)
|
||||
index=14
|
||||
;;
|
||||
*)
|
||||
stderr_print "unrecognized component $component"
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
[[ "$uri" =~ $URI_REGEX ]] && echo "${BASH_REMATCH[${index}]}"
|
||||
}
|
||||
|
||||
########################
|
||||
# Wait for a HTTP connection to succeed
|
||||
# Globals:
|
||||
# *
|
||||
# Arguments:
|
||||
# $1 - URL to wait for
|
||||
# $2 - Maximum amount of retries (optional)
|
||||
# $3 - Time between retries (optional)
|
||||
# Returns:
|
||||
# true if the HTTP connection succeeded, false otherwise
|
||||
#########################
|
||||
wait_for_http_connection() {
|
||||
local url="${1:?missing url}"
|
||||
local retries="${2:-}"
|
||||
local sleep_time="${3:-}"
|
||||
if ! retry_while "debug_execute curl --silent ${url}" "$retries" "$sleep_time"; then
|
||||
error "Could not connect to ${url}"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
|
@ -1,657 +0,0 @@
|
|||
#!/bin/bash
|
||||
# Copyright Broadcom, Inc. All Rights Reserved.
|
||||
# SPDX-License-Identifier: APACHE-2.0
|
||||
#
|
||||
# Library for operating system actions
|
||||
|
||||
# shellcheck disable=SC1091
|
||||
|
||||
# Load Generic Libraries
|
||||
. /opt/bitnami/scripts/liblog.sh
|
||||
. /opt/bitnami/scripts/libfs.sh
|
||||
. /opt/bitnami/scripts/libvalidations.sh
|
||||
|
||||
# Functions
|
||||
|
||||
########################
|
||||
# Check if an user exists in the system
|
||||
# Arguments:
|
||||
# $1 - user
|
||||
# Returns:
|
||||
# Boolean
|
||||
#########################
|
||||
user_exists() {
|
||||
local user="${1:?user is missing}"
|
||||
id "$user" >/dev/null 2>&1
|
||||
}
|
||||
|
||||
########################
|
||||
# Check if a group exists in the system
|
||||
# Arguments:
|
||||
# $1 - group
|
||||
# Returns:
|
||||
# Boolean
|
||||
#########################
|
||||
group_exists() {
|
||||
local group="${1:?group is missing}"
|
||||
getent group "$group" >/dev/null 2>&1
|
||||
}
|
||||
|
||||
########################
|
||||
# Create a group in the system if it does not exist already
|
||||
# Arguments:
|
||||
# $1 - group
|
||||
# Flags:
|
||||
# -i|--gid - the ID for the new group
|
||||
# -s|--system - Whether to create new user as system user (uid <= 999)
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
ensure_group_exists() {
|
||||
local group="${1:?group is missing}"
|
||||
local gid=""
|
||||
local is_system_user=false
|
||||
|
||||
# Validate arguments
|
||||
shift 1
|
||||
while [ "$#" -gt 0 ]; do
|
||||
case "$1" in
|
||||
-i | --gid)
|
||||
shift
|
||||
gid="${1:?missing gid}"
|
||||
;;
|
||||
-s | --system)
|
||||
is_system_user=true
|
||||
;;
|
||||
*)
|
||||
echo "Invalid command line flag $1" >&2
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
if ! group_exists "$group"; then
|
||||
local -a args=("$group")
|
||||
if [[ -n "$gid" ]]; then
|
||||
if group_exists "$gid"; then
|
||||
error "The GID $gid is already in use." >&2
|
||||
return 1
|
||||
fi
|
||||
args+=("--gid" "$gid")
|
||||
fi
|
||||
$is_system_user && args+=("--system")
|
||||
groupadd "${args[@]}" >/dev/null 2>&1
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Create an user in the system if it does not exist already
|
||||
# Arguments:
|
||||
# $1 - user
|
||||
# Flags:
|
||||
# -i|--uid - the ID for the new user
|
||||
# -g|--group - the group the new user should belong to
|
||||
# -a|--append-groups - comma-separated list of supplemental groups to append to the new user
|
||||
# -h|--home - the home directory for the new user
|
||||
# -s|--system - whether to create new user as system user (uid <= 999)
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
ensure_user_exists() {
|
||||
local user="${1:?user is missing}"
|
||||
local uid=""
|
||||
local group=""
|
||||
local append_groups=""
|
||||
local home=""
|
||||
local is_system_user=false
|
||||
|
||||
# Validate arguments
|
||||
shift 1
|
||||
while [ "$#" -gt 0 ]; do
|
||||
case "$1" in
|
||||
-i | --uid)
|
||||
shift
|
||||
uid="${1:?missing uid}"
|
||||
;;
|
||||
-g | --group)
|
||||
shift
|
||||
group="${1:?missing group}"
|
||||
;;
|
||||
-a | --append-groups)
|
||||
shift
|
||||
append_groups="${1:?missing append_groups}"
|
||||
;;
|
||||
-h | --home)
|
||||
shift
|
||||
home="${1:?missing home directory}"
|
||||
;;
|
||||
-s | --system)
|
||||
is_system_user=true
|
||||
;;
|
||||
*)
|
||||
echo "Invalid command line flag $1" >&2
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
if ! user_exists "$user"; then
|
||||
local -a user_args=("-N" "$user")
|
||||
if [[ -n "$uid" ]]; then
|
||||
if user_exists "$uid"; then
|
||||
error "The UID $uid is already in use."
|
||||
return 1
|
||||
fi
|
||||
user_args+=("--uid" "$uid")
|
||||
else
|
||||
$is_system_user && user_args+=("--system")
|
||||
fi
|
||||
useradd "${user_args[@]}" >/dev/null 2>&1
|
||||
fi
|
||||
|
||||
if [[ -n "$group" ]]; then
|
||||
local -a group_args=("$group")
|
||||
$is_system_user && group_args+=("--system")
|
||||
ensure_group_exists "${group_args[@]}"
|
||||
usermod -g "$group" "$user" >/dev/null 2>&1
|
||||
fi
|
||||
|
||||
if [[ -n "$append_groups" ]]; then
|
||||
local -a groups
|
||||
read -ra groups <<<"$(tr ',;' ' ' <<<"$append_groups")"
|
||||
for group in "${groups[@]}"; do
|
||||
ensure_group_exists "$group"
|
||||
usermod -aG "$group" "$user" >/dev/null 2>&1
|
||||
done
|
||||
fi
|
||||
|
||||
if [[ -n "$home" ]]; then
|
||||
mkdir -p "$home"
|
||||
usermod -d "$home" "$user" >/dev/null 2>&1
|
||||
configure_permissions_ownership "$home" -d "775" -f "664" -u "$user" -g "$group"
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Check if the script is currently running as root
|
||||
# Arguments:
|
||||
# $1 - user
|
||||
# $2 - group
|
||||
# Returns:
|
||||
# Boolean
|
||||
#########################
|
||||
am_i_root() {
|
||||
if [[ "$(id -u)" = "0" ]]; then
|
||||
true
|
||||
else
|
||||
false
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Print OS metadata
|
||||
# Arguments:
|
||||
# $1 - Flag name
|
||||
# Flags:
|
||||
# --id - Distro ID
|
||||
# --version - Distro version
|
||||
# --branch - Distro branch
|
||||
# --codename - Distro codename
|
||||
# --name - Distro name
|
||||
# --pretty-name - Distro pretty name
|
||||
# Returns:
|
||||
# String
|
||||
#########################
|
||||
get_os_metadata() {
|
||||
local -r flag_name="${1:?missing flag}"
|
||||
# Helper function
|
||||
get_os_release_metadata() {
|
||||
local -r env_name="${1:?missing environment variable name}"
|
||||
(
|
||||
. /etc/os-release
|
||||
echo "${!env_name}"
|
||||
)
|
||||
}
|
||||
case "$flag_name" in
|
||||
--id)
|
||||
get_os_release_metadata ID
|
||||
;;
|
||||
--version)
|
||||
get_os_release_metadata VERSION_ID
|
||||
;;
|
||||
--branch)
|
||||
get_os_release_metadata VERSION_ID | sed 's/\..*//'
|
||||
;;
|
||||
--codename)
|
||||
get_os_release_metadata VERSION_CODENAME
|
||||
;;
|
||||
--name)
|
||||
get_os_release_metadata NAME
|
||||
;;
|
||||
--pretty-name)
|
||||
get_os_release_metadata PRETTY_NAME
|
||||
;;
|
||||
*)
|
||||
error "Unknown flag ${flag_name}"
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
########################
|
||||
# Get total memory available
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# Memory in bytes
|
||||
#########################
|
||||
get_total_memory() {
|
||||
echo $(($(grep MemTotal /proc/meminfo | awk '{print $2}') / 1024))
|
||||
}
|
||||
|
||||
########################
|
||||
# Get machine size depending on specified memory
|
||||
# Globals:
|
||||
# None
|
||||
# Arguments:
|
||||
# None
|
||||
# Flags:
|
||||
# --memory - memory size (optional)
|
||||
# Returns:
|
||||
# Detected instance size
|
||||
#########################
|
||||
get_machine_size() {
|
||||
local memory=""
|
||||
# Validate arguments
|
||||
while [[ "$#" -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--memory)
|
||||
shift
|
||||
memory="${1:?missing memory}"
|
||||
;;
|
||||
*)
|
||||
echo "Invalid command line flag $1" >&2
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
if [[ -z "$memory" ]]; then
|
||||
debug "Memory was not specified, detecting available memory automatically"
|
||||
memory="$(get_total_memory)"
|
||||
fi
|
||||
sanitized_memory=$(convert_to_mb "$memory")
|
||||
if [[ "$sanitized_memory" -gt 26000 ]]; then
|
||||
echo 2xlarge
|
||||
elif [[ "$sanitized_memory" -gt 13000 ]]; then
|
||||
echo xlarge
|
||||
elif [[ "$sanitized_memory" -gt 6000 ]]; then
|
||||
echo large
|
||||
elif [[ "$sanitized_memory" -gt 3000 ]]; then
|
||||
echo medium
|
||||
elif [[ "$sanitized_memory" -gt 1500 ]]; then
|
||||
echo small
|
||||
else
|
||||
echo micro
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Get machine size depending on specified memory
|
||||
# Globals:
|
||||
# None
|
||||
# Arguments:
|
||||
# $1 - memory size (optional)
|
||||
# Returns:
|
||||
# Detected instance size
|
||||
#########################
|
||||
get_supported_machine_sizes() {
|
||||
echo micro small medium large xlarge 2xlarge
|
||||
}
|
||||
|
||||
########################
|
||||
# Convert memory size from string to amount of megabytes (i.e. 2G -> 2048)
|
||||
# Globals:
|
||||
# None
|
||||
# Arguments:
|
||||
# $1 - memory size
|
||||
# Returns:
|
||||
# Result of the conversion
|
||||
#########################
|
||||
convert_to_mb() {
|
||||
local amount="${1:-}"
|
||||
if [[ $amount =~ ^([0-9]+)(m|M|g|G) ]]; then
|
||||
size="${BASH_REMATCH[1]}"
|
||||
unit="${BASH_REMATCH[2]}"
|
||||
if [[ "$unit" = "g" || "$unit" = "G" ]]; then
|
||||
amount="$((size * 1024))"
|
||||
else
|
||||
amount="$size"
|
||||
fi
|
||||
fi
|
||||
echo "$amount"
|
||||
}
|
||||
|
||||
#########################
|
||||
# Redirects output to /dev/null if debug mode is disabled
|
||||
# Globals:
|
||||
# BITNAMI_DEBUG
|
||||
# Arguments:
|
||||
# $@ - Command to execute
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
debug_execute() {
|
||||
if is_boolean_yes "${BITNAMI_DEBUG:-false}"; then
|
||||
"$@"
|
||||
else
|
||||
"$@" >/dev/null 2>&1
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Retries a command a given number of times
|
||||
# Arguments:
|
||||
# $1 - cmd (as a string)
|
||||
# $2 - max retries. Default: 12
|
||||
# $3 - sleep between retries (in seconds). Default: 5
|
||||
# Returns:
|
||||
# Boolean
|
||||
#########################
|
||||
retry_while() {
|
||||
local cmd="${1:?cmd is missing}"
|
||||
local retries="${2:-12}"
|
||||
local sleep_time="${3:-5}"
|
||||
local return_value=1
|
||||
|
||||
read -r -a command <<<"$cmd"
|
||||
for ((i = 1; i <= retries; i += 1)); do
|
||||
"${command[@]}" && return_value=0 && break
|
||||
sleep "$sleep_time"
|
||||
done
|
||||
return $return_value
|
||||
}
|
||||
|
||||
########################
|
||||
# Generate a random string
|
||||
# Arguments:
|
||||
# -t|--type - String type (ascii, alphanumeric, numeric), defaults to ascii
|
||||
# -c|--count - Number of characters, defaults to 32
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
# Returns:
|
||||
# String
|
||||
#########################
|
||||
generate_random_string() {
|
||||
local type="ascii"
|
||||
local count="32"
|
||||
local filter
|
||||
local result
|
||||
# Validate arguments
|
||||
while [[ "$#" -gt 0 ]]; do
|
||||
case "$1" in
|
||||
-t | --type)
|
||||
shift
|
||||
type="$1"
|
||||
;;
|
||||
-c | --count)
|
||||
shift
|
||||
count="$1"
|
||||
;;
|
||||
*)
|
||||
echo "Invalid command line flag $1" >&2
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
# Validate type
|
||||
case "$type" in
|
||||
ascii)
|
||||
filter="[:print:]"
|
||||
;;
|
||||
numeric)
|
||||
filter="0-9"
|
||||
;;
|
||||
alphanumeric)
|
||||
filter="a-zA-Z0-9"
|
||||
;;
|
||||
alphanumeric+special|special+alphanumeric)
|
||||
# Limit variety of special characters, so there is a higher chance of containing more alphanumeric characters
|
||||
# Special characters are harder to write, and it could impact the overall UX if most passwords are too complex
|
||||
filter='a-zA-Z0-9:@.,/+!='
|
||||
;;
|
||||
*)
|
||||
echo "Invalid type ${type}" >&2
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
# Obtain count + 10 lines from /dev/urandom to ensure that the resulting string has the expected size
|
||||
# Note there is a very small chance of strings starting with EOL character
|
||||
# Therefore, the higher amount of lines read, this will happen less frequently
|
||||
result="$(head -n "$((count + 10))" /dev/urandom | tr -dc "$filter" | head -c "$count")"
|
||||
echo "$result"
|
||||
}
|
||||
|
||||
########################
|
||||
# Create md5 hash from a string
|
||||
# Arguments:
|
||||
# $1 - string
|
||||
# Returns:
|
||||
# md5 hash - string
|
||||
#########################
|
||||
generate_md5_hash() {
|
||||
local -r str="${1:?missing input string}"
|
||||
echo -n "$str" | md5sum | awk '{print $1}'
|
||||
}
|
||||
|
||||
########################
|
||||
# Create sha1 hash from a string
|
||||
# Arguments:
|
||||
# $1 - string
|
||||
# $2 - algorithm - 1 (default), 224, 256, 384, 512
|
||||
# Returns:
|
||||
# sha1 hash - string
|
||||
#########################
|
||||
generate_sha_hash() {
|
||||
local -r str="${1:?missing input string}"
|
||||
local -r algorithm="${2:-1}"
|
||||
echo -n "$str" | "sha${algorithm}sum" | awk '{print $1}'
|
||||
}
|
||||
|
||||
########################
|
||||
# Converts a string to its hexadecimal representation
|
||||
# Arguments:
|
||||
# $1 - string
|
||||
# Returns:
|
||||
# hexadecimal representation of the string
|
||||
#########################
|
||||
convert_to_hex() {
|
||||
local -r str=${1:?missing input string}
|
||||
local -i iterator
|
||||
local char
|
||||
for ((iterator = 0; iterator < ${#str}; iterator++)); do
|
||||
char=${str:iterator:1}
|
||||
printf '%x' "'${char}"
|
||||
done
|
||||
}
|
||||
|
||||
########################
|
||||
# Get boot time
|
||||
# Globals:
|
||||
# None
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# Boot time metadata
|
||||
#########################
|
||||
get_boot_time() {
|
||||
stat /proc --format=%Y
|
||||
}
|
||||
|
||||
########################
|
||||
# Get machine ID
|
||||
# Globals:
|
||||
# None
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# Machine ID
|
||||
#########################
|
||||
get_machine_id() {
|
||||
local machine_id
|
||||
if [[ -f /etc/machine-id ]]; then
|
||||
machine_id="$(cat /etc/machine-id)"
|
||||
fi
|
||||
if [[ -z "$machine_id" ]]; then
|
||||
# Fallback to the boot-time, which will at least ensure a unique ID in the current session
|
||||
machine_id="$(get_boot_time)"
|
||||
fi
|
||||
echo "$machine_id"
|
||||
}
|
||||
|
||||
########################
|
||||
# Get the root partition's disk device ID (e.g. /dev/sda1)
|
||||
# Globals:
|
||||
# None
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# Root partition disk ID
|
||||
#########################
|
||||
get_disk_device_id() {
|
||||
local device_id=""
|
||||
if grep -q ^/dev /proc/mounts; then
|
||||
device_id="$(grep ^/dev /proc/mounts | awk '$2 == "/" { print $1 }' | tail -1)"
|
||||
fi
|
||||
# If it could not be autodetected, fallback to /dev/sda1 as a default
|
||||
if [[ -z "$device_id" || ! -b "$device_id" ]]; then
|
||||
device_id="/dev/sda1"
|
||||
fi
|
||||
echo "$device_id"
|
||||
}
|
||||
|
||||
########################
|
||||
# Get the root disk device ID (e.g. /dev/sda)
|
||||
# Globals:
|
||||
# None
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# Root disk ID
|
||||
#########################
|
||||
get_root_disk_device_id() {
|
||||
get_disk_device_id | sed -E 's/p?[0-9]+$//'
|
||||
}
|
||||
|
||||
########################
|
||||
# Get the root disk size in bytes
|
||||
# Globals:
|
||||
# None
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# Root disk size in bytes
|
||||
#########################
|
||||
get_root_disk_size() {
|
||||
fdisk -l "$(get_root_disk_device_id)" | grep 'Disk.*bytes' | sed -E 's/.*, ([0-9]+) bytes,.*/\1/' || true
|
||||
}
|
||||
|
||||
########################
|
||||
# Run command as a specific user and group (optional)
|
||||
# Arguments:
|
||||
# $1 - USER(:GROUP) to switch to
|
||||
# $2..$n - command to execute
|
||||
# Returns:
|
||||
# Exit code of the specified command
|
||||
#########################
|
||||
run_as_user() {
|
||||
run_chroot "$@"
|
||||
}
|
||||
|
||||
########################
|
||||
# Execute command as a specific user and group (optional),
|
||||
# replacing the current process image
|
||||
# Arguments:
|
||||
# $1 - USER(:GROUP) to switch to
|
||||
# $2..$n - command to execute
|
||||
# Returns:
|
||||
# Exit code of the specified command
|
||||
#########################
|
||||
exec_as_user() {
|
||||
run_chroot --replace-process "$@"
|
||||
}
|
||||
|
||||
########################
|
||||
# Run a command using chroot
|
||||
# Arguments:
|
||||
# $1 - USER(:GROUP) to switch to
|
||||
# $2..$n - command to execute
|
||||
# Flags:
|
||||
# -r | --replace-process - Replace the current process image (optional)
|
||||
# Returns:
|
||||
# Exit code of the specified command
|
||||
#########################
|
||||
run_chroot() {
|
||||
local userspec
|
||||
local user
|
||||
local homedir
|
||||
local replace=false
|
||||
local -r cwd="$(pwd)"
|
||||
|
||||
# Parse and validate flags
|
||||
while [[ "$#" -gt 0 ]]; do
|
||||
case "$1" in
|
||||
-r | --replace-process)
|
||||
replace=true
|
||||
;;
|
||||
--)
|
||||
shift
|
||||
break
|
||||
;;
|
||||
-*)
|
||||
stderr_print "unrecognized flag $1"
|
||||
return 1
|
||||
;;
|
||||
*)
|
||||
break
|
||||
;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
# Parse and validate arguments
|
||||
if [[ "$#" -lt 2 ]]; then
|
||||
echo "expected at least 2 arguments"
|
||||
return 1
|
||||
else
|
||||
userspec=$1
|
||||
shift
|
||||
|
||||
# userspec can optionally include the group, so we parse the user
|
||||
user=$(echo "$userspec" | cut -d':' -f1)
|
||||
fi
|
||||
|
||||
if ! am_i_root; then
|
||||
error "Could not switch to '${userspec}': Operation not permitted"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Get the HOME directory for the user to switch, as chroot does
|
||||
# not properly update this env and some scripts rely on it
|
||||
homedir=$(eval echo "~${user}")
|
||||
if [[ ! -d $homedir ]]; then
|
||||
homedir="${HOME:-/}"
|
||||
fi
|
||||
|
||||
# Obtaining value for "$@" indirectly in order to properly support shell parameter expansion
|
||||
if [[ "$replace" = true ]]; then
|
||||
exec chroot --userspec="$userspec" / bash -c "cd ${cwd}; export HOME=${homedir}; exec \"\$@\"" -- "$@"
|
||||
else
|
||||
chroot --userspec="$userspec" / bash -c "cd ${cwd}; export HOME=${homedir}; exec \"\$@\"" -- "$@"
|
||||
fi
|
||||
}
|
||||
|
|
@ -1,124 +0,0 @@
|
|||
#!/bin/bash
|
||||
# Copyright Broadcom, Inc. All Rights Reserved.
|
||||
# SPDX-License-Identifier: APACHE-2.0
|
||||
#
|
||||
# Bitnami persistence library
|
||||
# Used for bringing persistence capabilities to applications that don't have clear separation of data and logic
|
||||
|
||||
# shellcheck disable=SC1091
|
||||
|
||||
# Load Generic Libraries
|
||||
. /opt/bitnami/scripts/libfs.sh
|
||||
. /opt/bitnami/scripts/libos.sh
|
||||
. /opt/bitnami/scripts/liblog.sh
|
||||
. /opt/bitnami/scripts/libversion.sh
|
||||
|
||||
# Functions
|
||||
|
||||
########################
|
||||
# Persist an application directory
|
||||
# Globals:
|
||||
# BITNAMI_ROOT_DIR
|
||||
# BITNAMI_VOLUME_DIR
|
||||
# Arguments:
|
||||
# $1 - App folder name
|
||||
# $2 - List of app files to persist
|
||||
# Returns:
|
||||
# true if all steps succeeded, false otherwise
|
||||
#########################
|
||||
persist_app() {
|
||||
local -r app="${1:?missing app}"
|
||||
local -a files_to_restore
|
||||
read -r -a files_to_persist <<< "$(tr ',;:' ' ' <<< "$2")"
|
||||
local -r install_dir="${BITNAMI_ROOT_DIR}/${app}"
|
||||
local -r persist_dir="${BITNAMI_VOLUME_DIR}/${app}"
|
||||
# Persist the individual files
|
||||
if [[ "${#files_to_persist[@]}" -le 0 ]]; then
|
||||
warn "No files are configured to be persisted"
|
||||
return
|
||||
fi
|
||||
pushd "$install_dir" >/dev/null || exit
|
||||
local file_to_persist_relative file_to_persist_destination file_to_persist_destination_folder
|
||||
local -r tmp_file="/tmp/perms.acl"
|
||||
for file_to_persist in "${files_to_persist[@]}"; do
|
||||
if [[ ! -f "$file_to_persist" && ! -d "$file_to_persist" ]]; then
|
||||
error "Cannot persist '${file_to_persist}' because it does not exist"
|
||||
return 1
|
||||
fi
|
||||
file_to_persist_relative="$(relativize "$file_to_persist" "$install_dir")"
|
||||
file_to_persist_destination="${persist_dir}/${file_to_persist_relative}"
|
||||
file_to_persist_destination_folder="$(dirname "$file_to_persist_destination")"
|
||||
# Get original permissions for existing files, which will be applied later
|
||||
# Exclude the root directory with 'sed', to avoid issues when copying the entirety of it to a volume
|
||||
getfacl -R "$file_to_persist_relative" | sed -E '/# file: (\..+|[^.])/,$!d' > "$tmp_file"
|
||||
# Copy directories to the volume
|
||||
ensure_dir_exists "$file_to_persist_destination_folder"
|
||||
cp -Lr --preserve=links "$file_to_persist_relative" "$file_to_persist_destination_folder"
|
||||
# Restore permissions
|
||||
pushd "$persist_dir" >/dev/null || exit
|
||||
if am_i_root; then
|
||||
setfacl --restore="$tmp_file"
|
||||
else
|
||||
# When running as non-root, don't change ownership
|
||||
setfacl --restore=<(grep -E -v '^# (owner|group):' "$tmp_file")
|
||||
fi
|
||||
popd >/dev/null || exit
|
||||
done
|
||||
popd >/dev/null || exit
|
||||
rm -f "$tmp_file"
|
||||
# Install the persisted files into the installation directory, via symlinks
|
||||
restore_persisted_app "$@"
|
||||
}
|
||||
|
||||
########################
|
||||
# Restore a persisted application directory
|
||||
# Globals:
|
||||
# BITNAMI_ROOT_DIR
|
||||
# BITNAMI_VOLUME_DIR
|
||||
# FORCE_MAJOR_UPGRADE
|
||||
# Arguments:
|
||||
# $1 - App folder name
|
||||
# $2 - List of app files to restore
|
||||
# Returns:
|
||||
# true if all steps succeeded, false otherwise
|
||||
#########################
|
||||
restore_persisted_app() {
|
||||
local -r app="${1:?missing app}"
|
||||
local -a files_to_restore
|
||||
read -r -a files_to_restore <<< "$(tr ',;:' ' ' <<< "$2")"
|
||||
local -r install_dir="${BITNAMI_ROOT_DIR}/${app}"
|
||||
local -r persist_dir="${BITNAMI_VOLUME_DIR}/${app}"
|
||||
# Restore the individual persisted files
|
||||
if [[ "${#files_to_restore[@]}" -le 0 ]]; then
|
||||
warn "No persisted files are configured to be restored"
|
||||
return
|
||||
fi
|
||||
local file_to_restore_relative file_to_restore_origin file_to_restore_destination
|
||||
for file_to_restore in "${files_to_restore[@]}"; do
|
||||
file_to_restore_relative="$(relativize "$file_to_restore" "$install_dir")"
|
||||
# We use 'realpath --no-symlinks' to ensure that the case of '.' is covered and the directory is removed
|
||||
file_to_restore_origin="$(realpath --no-symlinks "${install_dir}/${file_to_restore_relative}")"
|
||||
file_to_restore_destination="$(realpath --no-symlinks "${persist_dir}/${file_to_restore_relative}")"
|
||||
rm -rf "$file_to_restore_origin"
|
||||
ln -sfn "$file_to_restore_destination" "$file_to_restore_origin"
|
||||
done
|
||||
}
|
||||
|
||||
########################
|
||||
# Check if an application directory was already persisted
|
||||
# Globals:
|
||||
# BITNAMI_VOLUME_DIR
|
||||
# Arguments:
|
||||
# $1 - App folder name
|
||||
# Returns:
|
||||
# true if all steps succeeded, false otherwise
|
||||
#########################
|
||||
is_app_initialized() {
|
||||
local -r app="${1:?missing app}"
|
||||
local -r persist_dir="${BITNAMI_VOLUME_DIR}/${app}"
|
||||
if ! is_mounted_dir_empty "$persist_dir"; then
|
||||
true
|
||||
else
|
||||
false
|
||||
fi
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue