mirror of https://github.com/goharbor/harbor.git
Compare commits
145 Commits
| Author | SHA1 | Date |
|---|---|---|
|
|
8c3f0c6d4d | |
|
|
26f6310fbc | |
|
|
95003aa41b | |
|
|
8f7e4bb0f1 | |
|
|
80daa2dfcd | |
|
|
985d5c3f0c | |
|
|
71bbd5615a | |
|
|
e9bc2c3047 | |
|
|
b0e54cad76 | |
|
|
eb401aa0e1 | |
|
|
ce6e76ad77 | |
|
|
4103e3f9ed | |
|
|
bdee4da625 | |
|
|
575aed0698 | |
|
|
5a0e81e274 | |
|
|
c305a80616 | |
|
|
01f848e7f2 | |
|
|
5402ac7233 | |
|
|
c878bbf98f | |
|
|
4d3c0232b5 | |
|
|
6e15beb160 | |
|
|
e7066476a3 | |
|
|
d0934b2087 | |
|
|
c004f2d3e6 | |
|
|
4da6070872 | |
|
|
1a7eb31a5f | |
|
|
47586cd5b6 | |
|
|
9cc824fa5f | |
|
|
81fc9025b0 | |
|
|
430a8a7e48 | |
|
|
921b0d3a4c | |
|
|
c588d7932d | |
|
|
a0cdadba74 | |
|
|
0f1edd0200 | |
|
|
c122380b4d | |
|
|
a7e990c294 | |
|
|
08a6300db5 | |
|
|
d0ad35e0f2 | |
|
|
7781d6484e | |
|
|
d81beb35b2 | |
|
|
1e18d90691 | |
|
|
2c6de8783d | |
|
|
8262d0a167 | |
|
|
19c60ea8a9 | |
|
|
b1d41ce82b | |
|
|
634deb4705 | |
|
|
26b61390a0 | |
|
|
8f2003c40e | |
|
|
299fce5619 | |
|
|
46ce0a239e | |
|
|
dadc33b2bc | |
|
|
5a9c1f6b18 | |
|
|
aeea1a5c64 | |
|
|
4729d158eb | |
|
|
da2e863a6a | |
|
|
63b61d6995 | |
|
|
747d54d70e | |
|
|
7be31c187c | |
|
|
aaff6fecc4 | |
|
|
682eaaaa6b | |
|
|
e80b940942 | |
|
|
bd67811dec | |
|
|
a8336a7199 | |
|
|
ebaa0c4283 | |
|
|
221037378a | |
|
|
91d2fa6377 | |
|
|
9f809f17ba | |
|
|
ce86b43105 | |
|
|
50c4048ba4 | |
|
|
cc97ec0b62 | |
|
|
6a1abab687 | |
|
|
70b03c9483 | |
|
|
171d9b4c0e | |
|
|
257afebd5f | |
|
|
f15638c5f3 | |
|
|
ebc340a8f7 | |
|
|
de657686b3 | |
|
|
ea4110c30a | |
|
|
bb7162f5e6 | |
|
|
e8c2e478b6 | |
|
|
71f2ea84bd | |
|
|
8007c2e02e | |
|
|
0f67947c87 | |
|
|
ebdfb547ba | |
|
|
440f53ebbc | |
|
|
c83f2d114f | |
|
|
01dba8ad57 | |
|
|
19f4958ec3 | |
|
|
6c620dc20c | |
|
|
c93da7ff4b | |
|
|
0cf2d7545d | |
|
|
c0a859d538 | |
|
|
2565491758 | |
|
|
0a3c06d89c | |
|
|
6be2971941 | |
|
|
229ef88684 | |
|
|
0f8913bb27 | |
|
|
0c5d82e9d4 | |
|
|
b8e3dd8fa0 | |
|
|
e1e807072c | |
|
|
937e5920a2 | |
|
|
918aac61a6 | |
|
|
c0b22d8e24 | |
|
|
59c3de10a6 | |
|
|
b647032747 | |
|
|
ec9d13d107 | |
|
|
f46ef3b38d | |
|
|
907c6c0900 | |
|
|
780a217122 | |
|
|
145a10a8b9 | |
|
|
f46295aadb | |
|
|
e049fcd985 | |
|
|
7dcdec94e2 | |
|
|
3dee318a2e | |
|
|
a546f99974 | |
|
|
111fc1c03e | |
|
|
6f856cd6b1 | |
|
|
2faff8e6af | |
|
|
424cdd8828 | |
|
|
3df34c5735 | |
|
|
b4ba918118 | |
|
|
85f3f792e4 | |
|
|
073dab8a07 | |
|
|
ada851b49a | |
|
|
9e18bbc112 | |
|
|
49df3b4362 | |
|
|
bc8653abc7 | |
|
|
f684c1c36e | |
|
|
70306dca0c | |
|
|
b3cfe225db | |
|
|
bef66740ec | |
|
|
972965ff5a | |
|
|
187f1a9ffb | |
|
|
ff2f4b0e71 | |
|
|
9850f1404d | |
|
|
ad7be0b42f | |
|
|
6772477e8a | |
|
|
a13a16383a | |
|
|
b58a60e273 | |
|
|
9dcbd56e52 | |
|
|
f8f1994c9e | |
|
|
bfc29904f9 | |
|
|
259c8a2053 | |
|
|
7ad799c7c7 | |
|
|
d0917e3e66 |
|
|
@ -8,6 +8,9 @@
|
|||
* Add date here... Add signature here...
|
||||
- Add your reason here...
|
||||
|
||||
* Aug 12 2025 <yan-yw.wang@broadcom.com>
|
||||
- Refresh base image
|
||||
|
||||
* Oct 24 2024 <yan-yw.wang@broadcom.com>
|
||||
- Refresh base image
|
||||
|
||||
|
|
|
|||
|
|
@ -23,6 +23,16 @@ updates:
|
|||
schedule:
|
||||
interval: "weekly"
|
||||
labels:
|
||||
- "release-note/update"
|
||||
|
||||
- "release-note/bump-version"
|
||||
- "branch/main"
|
||||
|
||||
- package-ecosystem: "gomod"
|
||||
directory: "/src"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
target-branch: "release-2.14.0"
|
||||
labels:
|
||||
- "release-note/bump-version"
|
||||
- "branch/release-2.14.0"
|
||||
|
||||
# More will be needed
|
||||
|
|
@ -31,6 +31,10 @@ changelog:
|
|||
labels:
|
||||
- release-note/deprecation
|
||||
|
||||
- title: Bump Component Version 🤖
|
||||
labels:
|
||||
- release-note/bump-version
|
||||
|
||||
- title: Other Changes
|
||||
labels:
|
||||
- "*"
|
||||
- "*"
|
||||
|
|
|
|||
|
|
@ -15,6 +15,8 @@ env:
|
|||
UI_BUILDER_VERSION: 1.6.0
|
||||
|
||||
on:
|
||||
# the paths-ignore is the same as the paths in pass-CI.yml, they should be synced together
|
||||
# see https://web.archive.org/web/20230506145443/https://docs.github.com/en/repositories/configuring-branches-and-merges-in-your-repository/managing-protected-branches/troubleshooting-required-status-checks#handling-skipped-but-required-checks
|
||||
pull_request:
|
||||
paths-ignore:
|
||||
- 'docs/**'
|
||||
|
|
@ -23,7 +25,11 @@ on:
|
|||
- '!tests/**.sh'
|
||||
- '!tests/apitests/**'
|
||||
- '!tests/ci/**'
|
||||
- '!tests/resources/**'
|
||||
- '!tests/robot-cases/**'
|
||||
- '!tests/robot-cases/Group1-Nightly/**'
|
||||
push:
|
||||
# the paths-ignore is the same as the paths in pass-CI.yml, they should be synced together
|
||||
paths-ignore:
|
||||
- 'docs/**'
|
||||
- '**.md'
|
||||
|
|
@ -31,6 +37,9 @@ on:
|
|||
- '!tests/**.sh'
|
||||
- '!tests/apitests/**'
|
||||
- '!tests/ci/**'
|
||||
- '!tests/resources/**'
|
||||
- '!tests/robot-cases/**'
|
||||
- '!tests/robot-cases/Group1-Nightly/**'
|
||||
|
||||
jobs:
|
||||
UTTEST:
|
||||
|
|
@ -38,7 +47,7 @@ jobs:
|
|||
UTTEST: true
|
||||
runs-on:
|
||||
#- self-hosted
|
||||
- ubuntu-latest
|
||||
- oracle-vm-24cpu-96gb-x86-64
|
||||
timeout-minutes: 100
|
||||
steps:
|
||||
- name: Set up Go 1.23
|
||||
|
|
@ -46,7 +55,7 @@ jobs:
|
|||
with:
|
||||
go-version: 1.23.2
|
||||
id: go
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v5
|
||||
with:
|
||||
path: src/github.com/goharbor/harbor
|
||||
- name: setup env
|
||||
|
|
@ -91,7 +100,7 @@ jobs:
|
|||
- name: Codecov For BackEnd
|
||||
uses: codecov/codecov-action@v5
|
||||
with:
|
||||
file: ./src/github.com/goharbor/harbor/profile.cov
|
||||
files: ./src/github.com/goharbor/harbor/profile.cov
|
||||
flags: unittests
|
||||
|
||||
APITEST_DB:
|
||||
|
|
@ -99,7 +108,7 @@ jobs:
|
|||
APITEST_DB: true
|
||||
runs-on:
|
||||
#- self-hosted
|
||||
- ubuntu-latest
|
||||
- oracle-vm-24cpu-96gb-x86-64
|
||||
timeout-minutes: 100
|
||||
steps:
|
||||
- name: Set up Go 1.23
|
||||
|
|
@ -107,7 +116,7 @@ jobs:
|
|||
with:
|
||||
go-version: 1.23.2
|
||||
id: go
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v5
|
||||
with:
|
||||
path: src/github.com/goharbor/harbor
|
||||
- name: setup env
|
||||
|
|
@ -148,13 +157,18 @@ jobs:
|
|||
df -h
|
||||
bash ./tests/showtime.sh ./tests/ci/api_run.sh DB $IP
|
||||
df -h
|
||||
|
||||
- name: upload_logs
|
||||
uses: actions/upload-artifact@v5
|
||||
with:
|
||||
name: db-api-harbor-logs.tar.gz
|
||||
path: /home/runner/work/harbor/harbor/src/github.com/goharbor/harbor/integration_logs.tar.gz
|
||||
retention-days: 5
|
||||
APITEST_DB_PROXY_CACHE:
|
||||
env:
|
||||
APITEST_DB: true
|
||||
runs-on:
|
||||
#- self-hosted
|
||||
- ubuntu-latest
|
||||
- oracle-vm-24cpu-96gb-x86-64
|
||||
timeout-minutes: 100
|
||||
steps:
|
||||
- name: Set up Go 1.23
|
||||
|
|
@ -162,7 +176,7 @@ jobs:
|
|||
with:
|
||||
go-version: 1.23.2
|
||||
id: go
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v5
|
||||
with:
|
||||
path: src/github.com/goharbor/harbor
|
||||
- name: setup env
|
||||
|
|
@ -203,13 +217,18 @@ jobs:
|
|||
df -h
|
||||
bash ./tests/showtime.sh ./tests/ci/api_run.sh PROXY_CACHE $IP
|
||||
df -h
|
||||
|
||||
- name: upload_logs
|
||||
uses: actions/upload-artifact@v5
|
||||
with:
|
||||
name: proxy-api-harbor-logs.tar.gz
|
||||
path: /home/runner/work/harbor/harbor/src/github.com/goharbor/harbor/integration_logs.tar.gz
|
||||
retention-days: 5
|
||||
APITEST_LDAP:
|
||||
env:
|
||||
APITEST_LDAP: true
|
||||
runs-on:
|
||||
#- self-hosted
|
||||
- ubuntu-latest
|
||||
- oracle-vm-24cpu-96gb-x86-64
|
||||
timeout-minutes: 100
|
||||
steps:
|
||||
- name: Set up Go 1.23
|
||||
|
|
@ -217,7 +236,7 @@ jobs:
|
|||
with:
|
||||
go-version: 1.23.2
|
||||
id: go
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v5
|
||||
with:
|
||||
path: src/github.com/goharbor/harbor
|
||||
- name: setup env
|
||||
|
|
@ -256,13 +275,18 @@ jobs:
|
|||
cd src/github.com/goharbor/harbor
|
||||
bash ./tests/showtime.sh ./tests/ci/api_run.sh LDAP $IP
|
||||
df -h
|
||||
|
||||
- name: upload_logs
|
||||
uses: actions/upload-artifact@v5
|
||||
with:
|
||||
name: ldap-api-harbor-logs.tar.gz
|
||||
path: /home/runner/work/harbor/harbor/src/github.com/goharbor/harbor/integration_logs.tar.gz
|
||||
retention-days: 5
|
||||
OFFLINE:
|
||||
env:
|
||||
OFFLINE: true
|
||||
runs-on:
|
||||
#- self-hosted
|
||||
- ubuntu-latest
|
||||
- oracle-vm-24cpu-96gb-x86-64
|
||||
timeout-minutes: 100
|
||||
steps:
|
||||
- name: Set up Go 1.23
|
||||
|
|
@ -270,7 +294,7 @@ jobs:
|
|||
with:
|
||||
go-version: 1.23.2
|
||||
id: go
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v5
|
||||
with:
|
||||
path: src/github.com/goharbor/harbor
|
||||
- name: setup env
|
||||
|
|
@ -314,13 +338,13 @@ jobs:
|
|||
UI_UT: true
|
||||
runs-on:
|
||||
#- self-hosted
|
||||
- ubuntu-latest
|
||||
- oracle-vm-24cpu-96gb-x86-64
|
||||
timeout-minutes: 100
|
||||
steps:
|
||||
- uses: actions/setup-node@v4
|
||||
- uses: actions/setup-node@v5
|
||||
with:
|
||||
node-version: '18'
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v5
|
||||
with:
|
||||
path: src/github.com/goharbor/harbor
|
||||
- name: script
|
||||
|
|
@ -333,5 +357,5 @@ jobs:
|
|||
- name: Codecov For UI
|
||||
uses: codecov/codecov-action@v5
|
||||
with:
|
||||
file: ./src/github.com/goharbor/harbor/src/portal/coverage/lcov.info
|
||||
files: ./src/github.com/goharbor/harbor/src/portal/coverage/lcov.info
|
||||
flags: unittests
|
||||
|
|
|
|||
|
|
@ -13,16 +13,14 @@ jobs:
|
|||
env:
|
||||
BUILD_PACKAGE: true
|
||||
runs-on:
|
||||
- ubuntu-20.04
|
||||
- oracle-vm-24cpu-96gb-x86-64
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: 'google-github-actions/auth@v2'
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v5.0.0
|
||||
with:
|
||||
credentials_json: '${{ secrets.GCP_CREDENTIALS }}'
|
||||
- uses: google-github-actions/setup-gcloud@v2
|
||||
with:
|
||||
version: '430.0.0'
|
||||
- run: gcloud info
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: us-east-1
|
||||
- name: Set up Go 1.22
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
|
|
@ -33,13 +31,13 @@ jobs:
|
|||
with:
|
||||
docker_version: 20.10
|
||||
docker_channel: stable
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v5
|
||||
- uses: jitterbit/get-changed-files@v1
|
||||
id: changed-files
|
||||
with:
|
||||
format: space-delimited
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v5
|
||||
with:
|
||||
path: src/github.com/goharbor/harbor
|
||||
- name: Build Base Image
|
||||
|
|
@ -87,10 +85,10 @@ jobs:
|
|||
if [ -z "$BUILD_BASE" ] || [ "$BUILD_BASE" != "true" ]; then
|
||||
echo "Do not need to build base images!"
|
||||
else
|
||||
build_base_params=" BUILD_BASE=true PUSHBASEIMAGE=true REGISTRYUSER=\"${{ secrets.DOCKER_HUB_USERNAME }}\" REGISTRYPASSWORD=\"${{ secrets.DOCKER_HUB_PASSWORD }}\""
|
||||
build_base_params=" BUILD_BASE=true PULL_BASE_FROM_DOCKERHUB=true PUSHBASEIMAGE=true REGISTRYUSER=\"${{ secrets.DOCKER_HUB_USERNAME }}\" REGISTRYPASSWORD=\"${{ secrets.DOCKER_HUB_PASSWORD }}\""
|
||||
fi
|
||||
sudo make package_offline GOBUILDTAGS="include_oss include_gcs" BASEIMAGETAG=${Harbor_Build_Base_Tag} VERSIONTAG=${Harbor_Assets_Version} PKGVERSIONTAG=${Harbor_Package_Version} TRIVYFLAG=true HTTPPROXY= ${build_base_params}
|
||||
sudo make package_online GOBUILDTAGS="include_oss include_gcs" BASEIMAGETAG=${Harbor_Build_Base_Tag} VERSIONTAG=${Harbor_Assets_Version} PKGVERSIONTAG=${Harbor_Package_Version} TRIVYFLAG=true HTTPPROXY= ${build_base_params}
|
||||
sudo make package_offline GOBUILDTAGS="include_oss include_gcs" BASEIMAGETAG=${Harbor_Build_Base_Tag} VERSIONTAG=${Harbor_Assets_Version} PKGVERSIONTAG=${Harbor_Package_Version} TRIVYFLAG=true EXPORTERFLAG=true HTTPPROXY= ${build_base_params}
|
||||
sudo make package_online GOBUILDTAGS="include_oss include_gcs" BASEIMAGETAG=${Harbor_Build_Base_Tag} VERSIONTAG=${Harbor_Assets_Version} PKGVERSIONTAG=${Harbor_Package_Version} TRIVYFLAG=true EXPORTERFLAG=true HTTPPROXY= ${build_base_params}
|
||||
harbor_offline_build_bundle=$(basename harbor-offline-installer-*.tgz)
|
||||
harbor_online_build_bundle=$(basename harbor-online-installer-*.tgz)
|
||||
echo "Package name is: $harbor_offline_build_bundle"
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@ jobs:
|
|||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
# We must fetch at least the immediate parents so that if this is
|
||||
# a pull request then we can checkout the head.
|
||||
|
|
|
|||
|
|
@ -15,22 +15,20 @@ jobs:
|
|||
CONFORMANCE_TEST: true
|
||||
runs-on:
|
||||
#- self-hosted
|
||||
- ubuntu-latest
|
||||
- oracle-vm-24cpu-96gb-x86-64
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- id: 'auth'
|
||||
name: 'Authenticate to Google Cloud'
|
||||
uses: google-github-actions/auth@v2
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v5.0.0
|
||||
with:
|
||||
credentials_json: '${{ secrets.GCP_CREDENTIALS }}'
|
||||
- uses: google-github-actions/setup-gcloud@v2
|
||||
- run: gcloud info
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: us-east-1
|
||||
- name: Set up Go 1.21
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.23.2
|
||||
id: go
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v5
|
||||
with:
|
||||
path: src/github.com/goharbor/harbor
|
||||
- name: before_install
|
||||
|
|
@ -65,6 +63,5 @@ jobs:
|
|||
- name: upload test result to gs
|
||||
run: |
|
||||
cd src/github.com/goharbor/harbor
|
||||
gsutil cp ./distribution-spec/conformance/report.html gs://harbor-conformance-test/report.html
|
||||
gsutil acl ch -u AllUsers:R gs://harbor-conformance-test/report.html
|
||||
aws s3 cp ./distribution-spec/conformance/report.html s3://harbor-conformance-test/report.html
|
||||
if: always()
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ jobs:
|
|||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@v9.1.0
|
||||
- uses: actions/stale@v10.0.0
|
||||
with:
|
||||
stale-issue-message: 'This issue is being marked stale due to a period of inactivity. If this issue is still relevant, please comment or remove the stale label. Otherwise, this issue will close in 30 days.'
|
||||
stale-pr-message: 'This PR is being marked stale due to a period of inactivty. If this PR is still relevant, please comment or remove the stale label. Otherwise, this PR will close in 30 days.'
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v5
|
||||
- name: Run Trivy vulnerability scanner
|
||||
uses: aquasecurity/trivy-action@master
|
||||
with:
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ name: CI
|
|||
|
||||
on:
|
||||
pull_request:
|
||||
# the paths is the same as the paths-ignore in CI.yml, they should be synced together
|
||||
paths:
|
||||
- 'docs/**'
|
||||
- '**.md'
|
||||
|
|
@ -9,7 +10,11 @@ on:
|
|||
- '!tests/**.sh'
|
||||
- '!tests/apitests/**'
|
||||
- '!tests/ci/**'
|
||||
- '!tests/resources/**'
|
||||
- '!tests/robot-cases/**'
|
||||
- '!tests/robot-cases/Group1-Nightly/**'
|
||||
push:
|
||||
# the paths is the same as the paths-ignore in CI.yml, they should be synced together
|
||||
paths:
|
||||
- 'docs/**'
|
||||
- '**.md'
|
||||
|
|
@ -17,6 +22,9 @@ on:
|
|||
- '!tests/**.sh'
|
||||
- '!tests/apitests/**'
|
||||
- '!tests/ci/**'
|
||||
- '!tests/resources/**'
|
||||
- '!tests/robot-cases/**'
|
||||
- '!tests/robot-cases/Group1-Nightly/**'
|
||||
|
||||
jobs:
|
||||
UTTEST:
|
||||
|
|
|
|||
|
|
@ -7,9 +7,9 @@ on:
|
|||
|
||||
jobs:
|
||||
release:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: oracle-vm-24cpu-96gb-x86-64
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v5
|
||||
- name: Setup env
|
||||
run: |
|
||||
echo "CUR_TAG=${{ github.ref_name }}" >> $GITHUB_ENV
|
||||
|
|
@ -19,12 +19,12 @@ jobs:
|
|||
echo "PRE_TAG=$(echo $release | jq -r '.body' | jq -r '.preTag')" >> $GITHUB_ENV
|
||||
echo "BRANCH=$(echo $release | jq -r '.target_commitish')" >> $GITHUB_ENV
|
||||
echo "PRERELEASE=$(echo $release | jq -r '.prerelease')" >> $GITHUB_ENV
|
||||
- uses: 'google-github-actions/auth@v2'
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v5.0.0
|
||||
with:
|
||||
credentials_json: '${{ secrets.GCP_CREDENTIALS }}'
|
||||
- uses: google-github-actions/setup-gcloud@v2
|
||||
with:
|
||||
version: '430.0.0'
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: us-east-1
|
||||
- name: Prepare Assets
|
||||
run: |
|
||||
if [ ! ${{ env.BUILD_NO }} -o ${{ env.BUILD_NO }} = "null" ]
|
||||
|
|
@ -39,8 +39,8 @@ jobs:
|
|||
src_online_package=harbor-online-installer-${{ env.BASE_TAG }}-${{ env.BUILD_NO }}.tgz
|
||||
dst_offline_package=harbor-offline-installer-${{ env.CUR_TAG }}.tgz
|
||||
dst_online_package=harbor-online-installer-${{ env.CUR_TAG }}.tgz
|
||||
gsutil cp gs://${{ secrets.HARBOR_RELEASE_BUILD }}/${{ env.BRANCH }}/${src_offline_package} gs://${{ secrets.HARBOR_RELEASE_BUILD }}/${{ env.BRANCH }}/${dst_offline_package}
|
||||
gsutil cp gs://${{ secrets.HARBOR_RELEASE_BUILD }}/${{ env.BRANCH }}/${src_online_package} gs://${{ secrets.HARBOR_RELEASE_BUILD }}/${{ env.BRANCH }}/${dst_online_package}
|
||||
aws s3 cp s3://${{ secrets.HARBOR_RELEASE_BUILD }}/${{ env.BRANCH }}/${src_offline_package} s3://${{ secrets.HARBOR_RELEASE_BUILD }}/${{ env.BRANCH }}/${dst_offline_package}
|
||||
aws s3 cp s3://${{ secrets.HARBOR_RELEASE_BUILD }}/${{ env.BRANCH }}/${src_online_package} s3://${{ secrets.HARBOR_RELEASE_BUILD }}/${{ env.BRANCH }}/${dst_online_package}
|
||||
|
||||
assets_path=$(pwd)/assets
|
||||
source tools/release/release_utils.sh && getAssets ${{ secrets.HARBOR_RELEASE_BUILD }} ${{ env.BRANCH }} $dst_offline_package $dst_online_package ${{ env.PRERELEASE }} $assets_path
|
||||
|
|
|
|||
18
CHANGELOG.md
18
CHANGELOG.md
|
|
@ -31,10 +31,10 @@ API explorer integration. End users can now explore and trigger Harbor’s API v
|
|||
* Support Image Retag, enables the user to tag image to different repositories and projects, this is particularly useful in cases when images need to be retagged programmatically in a CI pipeline.
|
||||
* Support Image Build History, makes it easy to see the contents of a container image, refer to the [User Guide](https://github.com/goharbor/harbor/blob/release-1.7.0/docs/user_guide.md#build-history).
|
||||
* Support Logger customization, enables the user to customize STDOUT / STDERR / FILE / DB logger of running jobs.
|
||||
* Improve user experience of Helm Chart Repository:
|
||||
- Chart searching included in the global search results
|
||||
- Show chart versions total number in the chart list
|
||||
- Mark labels to helm charts
|
||||
* Improve the user experience of Helm Chart Repository:
|
||||
- Chart searching is included in the global search results
|
||||
- Show the total number of chart versions in the chart list
|
||||
- Mark labels in helm charts
|
||||
- The latest version can be downloaded as default one on the chart list view
|
||||
- The chart can be deleted by deleting all the versions under it
|
||||
|
||||
|
|
@ -58,7 +58,7 @@ API explorer integration. End users can now explore and trigger Harbor’s API v
|
|||
- Replication policy rework to support wildcard, scheduled replication.
|
||||
- Support repository level description.
|
||||
- Batch operation on projects/repositories/users from UI.
|
||||
- On board LDAP user when adding member to a project.
|
||||
- On board LDAP user when adding a member to a project.
|
||||
|
||||
## v1.3.0 (2018-01-04)
|
||||
|
||||
|
|
@ -75,11 +75,11 @@ API explorer integration. End users can now explore and trigger Harbor’s API v
|
|||
## v1.1.0 (2017-04-18)
|
||||
|
||||
- Add in Notary support
|
||||
- User can update configuration through Harbor UI
|
||||
- User can update the configuration through Harbor UI
|
||||
- Redesign of Harbor's UI using Clarity
|
||||
- Some changes to API
|
||||
- Fix some security issues in token service
|
||||
- Upgrade base image of nginx for latest openssl version
|
||||
- Fix some security issues in the token service
|
||||
- Upgrade the base image of nginx to the latest openssl version
|
||||
- Various bug fixes.
|
||||
|
||||
## v0.5.0 (2016-12-6)
|
||||
|
|
@ -88,7 +88,7 @@ API explorer integration. End users can now explore and trigger Harbor’s API v
|
|||
- Easier configuration for HTTPS in prepare script
|
||||
- Script to collect logs of a Harbor deployment
|
||||
- User can view the storage usage (default location) of Harbor.
|
||||
- Add an attribute to disable normal user to create project
|
||||
- Add an attribute to disable normal users from creating projects.
|
||||
- Various bug fixes.
|
||||
|
||||
For Harbor virtual appliance:
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ Contributors are encouraged to collaborate using the following resources in addi
|
|||
* Chat with us on the CNCF Slack ([get an invitation here][cncf-slack] )
|
||||
* [#harbor][users-slack] for end-user discussions
|
||||
* [#harbor-dev][dev-slack] for development of Harbor
|
||||
* Want long-form communication instead of Slack? We have two distributions lists:
|
||||
* Want long-form communication instead of Slack? We have two distribution lists:
|
||||
* [harbor-users][users-dl] for end-user discussions
|
||||
* [harbor-dev][dev-dl] for development of Harbor
|
||||
|
||||
|
|
@ -49,7 +49,7 @@ To build the project, please refer the [build](https://goharbor.io/docs/edge/bui
|
|||
|
||||
### Repository Structure
|
||||
|
||||
Here is the basic structure of the harbor code base. Some key folders / files are commented for your references.
|
||||
Here is the basic structure of the Harbor code base. Some key folders / files are commented for your reference.
|
||||
```
|
||||
.
|
||||
...
|
||||
|
|
@ -168,13 +168,14 @@ Harbor backend is written in [Go](http://golang.org/). If you don't have a Harbo
|
|||
| 2.11 | 1.22.3 |
|
||||
| 2.12 | 1.23.2 |
|
||||
| 2.13 | 1.23.8 |
|
||||
| 2.14 | 1.24.6 |
|
||||
|
||||
|
||||
Ensure your GOPATH and PATH have been configured in accordance with the Go environment instructions.
|
||||
|
||||
#### Web
|
||||
|
||||
Harbor web UI is built based on [Clarity](https://vmware.github.io/clarity/) and [Angular](https://angular.io/) web framework. To setup web UI development environment, please make sure the [npm](https://www.npmjs.com/get-npm) tool is installed first.
|
||||
Harbor web UI is built based on [Clarity](https://vmware.github.io/clarity/) and [Angular](https://angular.io/) web framework. To setup a web UI development environment, please make sure that the [npm](https://www.npmjs.com/get-npm) tool is installed first.
|
||||
|
||||
| Harbor | Requires Angular | Requires Clarity |
|
||||
|----------|--------------------|--------------------|
|
||||
|
|
@ -204,7 +205,7 @@ PR are always welcome, even if they only contain small fixes like typos or a few
|
|||
|
||||
Please submit a PR broken down into small changes bit by bit. A PR consisting of a lot of features and code changes may be hard to review. It is recommended to submit PRs in an incremental fashion.
|
||||
|
||||
Note: If you split your pull request to small changes, please make sure any of the changes goes to `main` will not break anything. Otherwise, it can not be merged until this feature complete.
|
||||
Note: If you split your pull request to small changes, please make sure any of the changes goes to `main` will not break anything. Otherwise, it can not be merged until this feature completed.
|
||||
|
||||
### Fork and clone
|
||||
|
||||
|
|
@ -278,7 +279,7 @@ To build the code, please refer to [build](https://goharbor.io/docs/edge/build-c
|
|||
|
||||
**Note**: from v2.0, Harbor uses [go-swagger](https://github.com/go-swagger/go-swagger) to generate API server from Swagger 2.0 (aka [OpenAPI 2.0](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md)). To add or change the APIs, first update the `api/v2.0/swagger.yaml` file, then run `make gen_apis` to generate the API server, finally, implement or update the API handlers in `src/server/v2.0/handler` package.
|
||||
|
||||
As now Harbor uses `controller/manager/dao` programming model, we suggest to use [testify mock](https://github.com/stretchr/testify/blob/master/mock/doc.go) to test `controller` and `manager`. Harbor integrates [mockery](https://github.com/vektra/mockery) to generate mocks for golang interfaces using the testify mock package. To generate mocks for the interface, first add mock config in the `src/.mockery.yaml`, then run `make gen_mocks` to generate mocks.
|
||||
As Harbor now uses `controller/manager/dao` programming model, we suggest using [testify mock](https://github.com/stretchr/testify/blob/master/mock/doc.go) to test `controller` and `manager`. Harbor integrates [mockery](https://github.com/vektra/mockery) to generate mocks for golang interfaces using the testify mock package. To generate mocks for the interface, first add mock config in the `src/.mockery.yaml`, then run `make gen_mocks` to generate mocks.
|
||||
|
||||
### Keep sync with upstream
|
||||
|
||||
|
|
@ -317,15 +318,15 @@ curl https://cdn.jsdelivr.net/gh/tommarshall/git-good-commit@v0.6.1/hook.sh > .g
|
|||
```
|
||||
|
||||
### Automated Testing
|
||||
Once your pull request has been opened, harbor will run two CI pipelines against it.
|
||||
Once your pull request has been opened, Harbor will run two CI pipelines against it.
|
||||
1. In the travis CI, your source code will be checked via `golint`, `go vet` and `go race` that makes sure the code is readable, safe and correct. Also, all of unit tests will be triggered via `go test` against the pull request. What you need to pay attention to is the travis result and the coverage report.
|
||||
* If any failure in travis, you need to figure out whether it is introduced by your commits.
|
||||
* If the coverage dramatic decline, you need to commit unit test to coverage your code.
|
||||
2. In the drone CI, the E2E test will be triggered against the pull request. Also, the source code will be checked via `gosec`, and the result is stored in google storage for later analysis. The pipeline is about to build and install harbor from source code, then to run four very basic E2E tests to validate the basic functionalities of harbor, like:
|
||||
* Registry Basic Verification, to validate the image can be pulled and pushed successful.
|
||||
* Trivy Basic Verification, to validate the image can be scanned successful.
|
||||
* Notary Basic Verification, to validate the image can be signed successful.
|
||||
* Ldap Basic Verification, to validate harbor can work in LDAP environment.
|
||||
* If the coverage dramatically declines, then you need to commit a unit test to cover your code.
|
||||
2. In the drone CI, the E2E test will be triggered against the pull request. Also, the source code will be checked via `gosec`, and the result is stored in google storage for later analysis. The pipeline is about to build and install harbor from source code, then to run four very basic E2E tests to validate the basic functionalities of Harbor, like:
|
||||
* Registry Basic Verification, to validate that the image can be pulled and pushed successfully.
|
||||
* Trivy Basic Verification, to validate that the image can be scanned successfully.
|
||||
* Notary Basic Verification, to validate that the image can be signed successfully.
|
||||
* Ldap Basic Verification, to validate that Harbor can work in LDAP environment.
|
||||
|
||||
### Push and Create PR
|
||||
When ready for review, push your branch to your fork repository on `github.com`:
|
||||
|
|
@ -344,7 +345,7 @@ Commit changes made in response to review comments to the same branch on your fo
|
|||
|
||||
It is a great way to contribute to Harbor by reporting an issue. Well-written and complete bug reports are always welcome! Please open an issue on GitHub and follow the template to fill in required information.
|
||||
|
||||
Before opening any issue, please look up the existing [issues](https://github.com/goharbor/harbor/issues) to avoid submitting a duplication.
|
||||
Before opening any issue, please look up the existing [issues](https://github.com/goharbor/harbor/issues) to avoid submitting a duplicate.
|
||||
If you find a match, you can "subscribe" to it to get notified on updates. If you have additional helpful information about the issue, please leave a comment.
|
||||
|
||||
When reporting issues, always include:
|
||||
|
|
|
|||
67
Makefile
67
Makefile
|
|
@ -78,6 +78,7 @@ REGISTRYSERVER=
|
|||
REGISTRYPROJECTNAME=goharbor
|
||||
DEVFLAG=true
|
||||
TRIVYFLAG=false
|
||||
EXPORTERFLAG=false
|
||||
HTTPPROXY=
|
||||
BUILDREG=true
|
||||
BUILDTRIVYADP=true
|
||||
|
|
@ -92,7 +93,12 @@ VERSIONTAG=dev
|
|||
BUILD_BASE=true
|
||||
PUSHBASEIMAGE=false
|
||||
BASEIMAGETAG=dev
|
||||
BUILDBASETARGET=trivy-adapter core db jobservice log nginx portal prepare redis registry registryctl exporter
|
||||
# for skip build prepare and log container while BUILD_INSTALLER=false
|
||||
BUILD_INSTALLER=true
|
||||
BUILDBASETARGET=trivy-adapter core db jobservice nginx portal redis registry registryctl exporter
|
||||
ifeq ($(BUILD_INSTALLER), true)
|
||||
BUILDBASETARGET += prepare log
|
||||
endif
|
||||
IMAGENAMESPACE=goharbor
|
||||
BASEIMAGENAMESPACE=goharbor
|
||||
# #input true/false only
|
||||
|
|
@ -105,8 +111,8 @@ PREPARE_VERSION_NAME=versions
|
|||
|
||||
#versions
|
||||
REGISTRYVERSION=v2.8.3-patch-redis
|
||||
TRIVYVERSION=v0.61.0
|
||||
TRIVYADAPTERVERSION=v0.33.0-rc.2
|
||||
TRIVYVERSION=v0.65.0
|
||||
TRIVYADAPTERVERSION=v0.34.0-rc.1
|
||||
NODEBUILDIMAGE=node:16.18.0
|
||||
|
||||
# version of registry for pulling the source code
|
||||
|
|
@ -129,11 +135,12 @@ endef
|
|||
# docker parameters
|
||||
DOCKERCMD=$(shell which docker)
|
||||
DOCKERBUILD=$(DOCKERCMD) build
|
||||
DOCKERNETWORK=default
|
||||
DOCKERRMIMAGE=$(DOCKERCMD) rmi
|
||||
DOCKERPULL=$(DOCKERCMD) pull
|
||||
DOCKERIMAGES=$(DOCKERCMD) images
|
||||
DOCKERSAVE=$(DOCKERCMD) save
|
||||
DOCKERCOMPOSECMD=$(shell which docker-compose)
|
||||
DOCKERCOMPOSECMD=$(shell which docker-compose 2>/dev/null || echo "docker compose")
|
||||
DOCKERTAG=$(DOCKERCMD) tag
|
||||
|
||||
# go parameters
|
||||
|
|
@ -144,7 +151,7 @@ GOINSTALL=$(GOCMD) install
|
|||
GOTEST=$(GOCMD) test
|
||||
GODEP=$(GOTEST) -i
|
||||
GOFMT=gofmt -w
|
||||
GOBUILDIMAGE=golang:1.23.8
|
||||
GOBUILDIMAGE=golang:1.24.6
|
||||
GOBUILDPATHINCONTAINER=/harbor
|
||||
|
||||
# go build
|
||||
|
|
@ -238,18 +245,27 @@ REGISTRYUSER=
|
|||
REGISTRYPASSWORD=
|
||||
|
||||
# cmds
|
||||
DOCKERSAVE_PARA=$(DOCKER_IMAGE_NAME_PREPARE):$(VERSIONTAG) \
|
||||
$(DOCKERIMAGENAME_PORTAL):$(VERSIONTAG) \
|
||||
DOCKERSAVE_PARA=$(DOCKERIMAGENAME_PORTAL):$(VERSIONTAG) \
|
||||
$(DOCKERIMAGENAME_CORE):$(VERSIONTAG) \
|
||||
$(DOCKERIMAGENAME_LOG):$(VERSIONTAG) \
|
||||
$(DOCKERIMAGENAME_DB):$(VERSIONTAG) \
|
||||
$(DOCKERIMAGENAME_JOBSERVICE):$(VERSIONTAG) \
|
||||
$(DOCKERIMAGENAME_REGCTL):$(VERSIONTAG) \
|
||||
$(DOCKERIMAGENAME_EXPORTER):$(VERSIONTAG) \
|
||||
$(IMAGENAMESPACE)/redis-photon:$(VERSIONTAG) \
|
||||
$(IMAGENAMESPACE)/nginx-photon:$(VERSIONTAG) \
|
||||
$(IMAGENAMESPACE)/registry-photon:$(VERSIONTAG)
|
||||
|
||||
ifeq ($(BUILD_INSTALLER), true)
|
||||
DOCKERSAVE_PARA+= $(DOCKER_IMAGE_NAME_PREPARE):$(VERSIONTAG) \
|
||||
$(DOCKERIMAGENAME_LOG):$(VERSIONTAG)
|
||||
endif
|
||||
|
||||
ifeq ($(TRIVYFLAG), true)
|
||||
DOCKERSAVE_PARA+= $(IMAGENAMESPACE)/trivy-adapter-photon:$(VERSIONTAG)
|
||||
endif
|
||||
ifeq ($(EXPORTERFLAG), true)
|
||||
DOCKERSAVE_PARA+= $(DOCKERIMAGENAME_EXPORTER):$(VERSIONTAG)
|
||||
endif
|
||||
|
||||
PACKAGE_OFFLINE_PARA=-zcvf harbor-offline-installer-$(PKGVERSIONTAG).tgz \
|
||||
$(HARBORPKG)/$(DOCKERIMGFILE).$(VERSIONTAG).tar.gz \
|
||||
$(HARBORPKG)/prepare \
|
||||
|
|
@ -266,11 +282,6 @@ PACKAGE_ONLINE_PARA=-zcvf harbor-online-installer-$(PKGVERSIONTAG).tgz \
|
|||
|
||||
DOCKERCOMPOSE_FILE_OPT=-f $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSEFILENAME)
|
||||
|
||||
ifeq ($(TRIVYFLAG), true)
|
||||
DOCKERSAVE_PARA+= $(IMAGENAMESPACE)/trivy-adapter-photon:$(VERSIONTAG)
|
||||
endif
|
||||
|
||||
|
||||
RUNCONTAINER=$(DOCKERCMD) run --rm -u $(shell id -u):$(shell id -g) -v $(BUILDPATH):$(BUILDPATH) -w $(BUILDPATH)
|
||||
|
||||
# $1 the name of the docker image
|
||||
|
|
@ -308,13 +319,13 @@ define swagger_generate_server
|
|||
@$(SWAGGER_GENERATE_SERVER) -f $(1) -A $(3) --target $(2)
|
||||
endef
|
||||
|
||||
gen_apis: lint_apis
|
||||
gen_apis:
|
||||
$(call prepare_docker_image,${SWAGGER_IMAGENAME},${SWAGGER_VERSION},${SWAGGER_IMAGE_BUILD_CMD})
|
||||
$(call swagger_generate_server,api/v2.0/swagger.yaml,src/server/v2.0,harbor)
|
||||
|
||||
|
||||
MOCKERY_IMAGENAME=$(IMAGENAMESPACE)/mockery
|
||||
MOCKERY_VERSION=v2.51.0
|
||||
MOCKERY_VERSION=v2.53.3
|
||||
MOCKERY=$(RUNCONTAINER)/src ${MOCKERY_IMAGENAME}:${MOCKERY_VERSION}
|
||||
MOCKERY_IMAGE_BUILD_CMD=${DOCKERBUILD} -f ${TOOLSPATH}/mockery/Dockerfile --build-arg GOLANG=${GOBUILDIMAGE} --build-arg MOCKERY_VERSION=${MOCKERY_VERSION} -t ${MOCKERY_IMAGENAME}:$(MOCKERY_VERSION) .
|
||||
|
||||
|
|
@ -338,7 +349,7 @@ versions_prepare:
|
|||
check_environment:
|
||||
@$(MAKEPATH)/$(CHECKENVCMD)
|
||||
|
||||
compile_core: gen_apis
|
||||
compile_core: lint_apis gen_apis
|
||||
@echo "compiling binary for core (golang image)..."
|
||||
@echo $(GOBUILDPATHINCONTAINER)
|
||||
@$(DOCKERCMD) run --rm -v $(BUILDPATH):$(GOBUILDPATHINCONTAINER) -w $(GOBUILDPATH_CORE) $(GOBUILDIMAGE) $(GOIMAGEBUILD_CORE) -o $(GOBUILDPATHINCONTAINER)/$(GOBUILDMAKEPATH_CORE)/$(CORE_BINARYNAME)
|
||||
|
|
@ -384,22 +395,19 @@ build:
|
|||
echo Do not push base images since no base images built. ; \
|
||||
exit 1; \
|
||||
fi
|
||||
# PULL_BASE_FROM_DOCKERHUB should be true if BUILD_BASE is not true
|
||||
@if [ "$(BUILD_BASE)" != "true" ] && [ "$(PULL_BASE_FROM_DOCKERHUB)" = "false" ] ; then \
|
||||
echo Should pull base images from registry in docker configuration since no base images built. ; \
|
||||
exit 1; \
|
||||
fi
|
||||
make -f $(MAKEFILEPATH_PHOTON)/Makefile $(BUILDTARGET) -e DEVFLAG=$(DEVFLAG) -e GOBUILDIMAGE=$(GOBUILDIMAGE) -e NODEBUILDIMAGE=$(NODEBUILDIMAGE) \
|
||||
-e REGISTRYVERSION=$(REGISTRYVERSION) -e REGISTRY_SRC_TAG=$(REGISTRY_SRC_TAG) -e DISTRIBUTION_SRC=$(DISTRIBUTION_SRC)\
|
||||
-e TRIVYVERSION=$(TRIVYVERSION) -e TRIVYADAPTERVERSION=$(TRIVYADAPTERVERSION) \
|
||||
-e VERSIONTAG=$(VERSIONTAG) \
|
||||
-e DOCKERNETWORK=$(DOCKERNETWORK) \
|
||||
-e BUILDREG=$(BUILDREG) -e BUILDTRIVYADP=$(BUILDTRIVYADP) \
|
||||
-e BUILD_INSTALLER=$(BUILD_INSTALLER) \
|
||||
-e NPM_REGISTRY=$(NPM_REGISTRY) -e BASEIMAGETAG=$(BASEIMAGETAG) -e IMAGENAMESPACE=$(IMAGENAMESPACE) -e BASEIMAGENAMESPACE=$(BASEIMAGENAMESPACE) \
|
||||
-e REGISTRYURL=$(REGISTRYURL) \
|
||||
-e TRIVY_DOWNLOAD_URL=$(TRIVY_DOWNLOAD_URL) -e TRIVY_ADAPTER_DOWNLOAD_URL=$(TRIVY_ADAPTER_DOWNLOAD_URL) \
|
||||
-e PULL_BASE_FROM_DOCKERHUB=$(PULL_BASE_FROM_DOCKERHUB) -e BUILD_BASE=$(BUILD_BASE) \
|
||||
-e REGISTRYUSER=$(REGISTRYUSER) -e REGISTRYPASSWORD=$(REGISTRYPASSWORD) \
|
||||
-e PUSHBASEIMAGE=$(PUSHBASEIMAGE)
|
||||
-e PUSHBASEIMAGE=$(PUSHBASEIMAGE) -e GOBUILDIMAGE=$(GOBUILDIMAGE)
|
||||
|
||||
build_standalone_db_migrator: compile_standalone_db_migrator
|
||||
make -f $(MAKEFILEPATH_PHOTON)/Makefile _build_standalone_db_migrator -e BASEIMAGETAG=$(BASEIMAGETAG) -e VERSIONTAG=$(VERSIONTAG)
|
||||
|
|
@ -440,7 +448,14 @@ package_online: update_prepare_version
|
|||
@rm -rf $(HARBORPKG)
|
||||
@echo "Done."
|
||||
|
||||
package_offline: update_prepare_version compile build
|
||||
.PHONY: check_buildinstaller
|
||||
check_buildinstaller:
|
||||
@if [ "$(BUILD_INSTALLER)" != "true" ]; then \
|
||||
echo "Must set BUILD_INSTALLER as true while triggering package_offline build" ; \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
package_offline: check_buildinstaller update_prepare_version compile build
|
||||
|
||||
@echo "packing offline package ..."
|
||||
@cp -r make $(HARBORPKG)
|
||||
|
|
@ -471,7 +486,7 @@ misspell:
|
|||
@find . -type d \( -path ./tests \) -prune -o -name '*.go' -print | xargs misspell -error
|
||||
|
||||
# golangci-lint binary installation or refer to https://golangci-lint.run/usage/install/#local-installation
|
||||
# curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.55.2
|
||||
# curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v2.1.2
|
||||
GOLANGCI_LINT := $(shell go env GOPATH)/bin/golangci-lint
|
||||
lint:
|
||||
@echo checking lint
|
||||
|
|
@ -539,7 +554,7 @@ swagger_client:
|
|||
rm -rf harborclient
|
||||
mkdir -p harborclient/harbor_v2_swagger_client
|
||||
java -jar openapi-generator-cli.jar generate -i api/v2.0/swagger.yaml -g python -o harborclient/harbor_v2_swagger_client --package-name v2_swagger_client
|
||||
cd harborclient/harbor_v2_swagger_client; python ./setup.py install
|
||||
cd harborclient/harbor_v2_swagger_client; pip install .
|
||||
pip install docker -q
|
||||
pip freeze
|
||||
|
||||
|
|
|
|||
|
|
@ -1,14 +1,13 @@
|
|||
# Harbor
|
||||
|
||||
[](https://github.com/goharbor/harbor/actions?query=event%3Apush+branch%3Amain+workflow%3ACI+)
|
||||
[](https://codecov.io/gh/goharbor/harbor)
|
||||
[](https://github.com/goharbor/harbor/actions/workflows/CI.yml)
|
||||
[](https://goreportcard.com/report/github.com/goharbor/harbor)
|
||||
[](https://codecov.io/gh/goharbor/harbor)
|
||||
[](https://bestpractices.coreinfrastructure.org/projects/2095)
|
||||
[](https://www.codacy.com/gh/goharbor/harbor/dashboard?utm_source=github.com&utm_medium=referral&utm_content=goharbor/harbor&utm_campaign=Badge_Grade)
|
||||

|
||||
[](https://www.googleapis.com/storage/v1/b/harbor-nightly/o)
|
||||

|
||||

|
||||
[](https://app.fossa.com/projects/git%2Bgithub.com%2Fgoharbor%2Fharbor?ref=badge_shield)
|
||||
[](https://artifacthub.io/packages/helm/harbor/harbor)
|
||||
</br>
|
||||
|
||||
|Community Meeting|
|
||||
|
|
|
|||
17
RELEASES.md
17
RELEASES.md
|
|
@ -1,28 +1,27 @@
|
|||
# Versioning and Release
|
||||
This document describes the versioning and release process of Harbor. This document is a living document, contents will be updated according to each release.
|
||||
This document describes the versioning and release process of Harbor. This document is a living document, it's contents will be updated according to each release.
|
||||
|
||||
## Releases
|
||||
Harbor releases will be versioned using dotted triples, similar to [Semantic Version](http://semver.org/). For this specific document, we will refer to the respective components of this triple as `<major>.<minor>.<patch>`. The version number may have additional information, such as "-rc1,-rc2,-rc3" to mark release candidate builds for earlier access. Such releases will be considered as "pre-releases".
|
||||
|
||||
### Major and Minor Releases
|
||||
Major and minor releases of Harbor will be branched from `main` when the release reaches to `RC(release candidate)` state. The branch format should follow `release-<major>.<minor>.0`. For example, once the release `v1.0.0` reaches to RC, a branch will be created with the format `release-1.0.0`. When the release reaches to `GA(General Available)` state, The tag with format `v<major>.<minor>.<patch>` and should be made with command `git tag -s v<major>.<minor>.<patch>`. The release cadence is around 3 months, might be adjusted based on open source event, but will communicate it clearly.
|
||||
Major and minor releases of Harbor will be branched from `main` when the release reaches to `RC(release candidate)` state. The branch format should follow `release-<major>.<minor>.0`. For example, once the release `v1.0.0` reaches to RC, a branch will be created with the format `release-1.0.0`. When the release reaches to `GA(General Available)` state, the tag with format `v<major>.<minor>.<patch>` and should be made with the command `git tag -s v<major>.<minor>.<patch>`. The release cadence is around 3 months, might be adjusted based on open source events, but will communicate it clearly.
|
||||
|
||||
### Patch releases
|
||||
Patch releases are based on the major/minor release branch, the release cadence for patch release of recent minor release is one month to solve critical community and security issues. The cadence for patch release of recent minus two minor releases are on-demand driven based on the severity of the issue to be fixed.
|
||||
|
||||
### Pre-releases
|
||||
`Pre-releases:mainly the different RC builds` will be compiled from their corresponding branches. Please note they are done to assist in the stabilization process, no guarantees are provided.
|
||||
`Pre-releases:mainly the different RC builds` will be compiled from their corresponding branches. Please note that they are done to assist in the stabilization process, no guarantees are provided.
|
||||
|
||||
### Minor Release Support Matrix
|
||||
| Version | Supported |
|
||||
|----------------| ------------------ |
|
||||
| Harbor v2.13.x | :white_check_mark: |
|
||||
| Harbor v2.12.x | :white_check_mark: |
|
||||
| Harbor v2.11.x | :white_check_mark: |
|
||||
| Harbor v2.10.x | :white_check_mark: |
|
||||
|
||||
|
||||
### Upgrade path and support policy
|
||||
The upgrade path for Harbor is (1) 2.2.x patch releases are always compatible with its major and minor version. For example, previous released 2.2.x can be upgraded to most recent 2.2.3 release. (2) Harbor only supports two previous minor releases to upgrade to current minor release. For example, 2.3.0 will only support 2.1.0 and 2.2.0 to upgrade from, 2.0.0 to 2.3.0 is not supported. One should upgrade to 2.2.0 first, then to 2.3.0.
|
||||
The upgrade path for Harbor is (1) 2.2.x patch releases are always compatible with its major and minor versions. For example, previous released 2.2.x can be upgraded to most recent 2.2.3 release. (2) Harbor only supports two previous minor releases to upgrade to current minor release. For example, 2.3.0 will only support 2.1.0 and 2.2.0 to upgrade from, 2.0.0 to 2.3.0 is not supported. One should upgrade to 2.2.0 first, then to 2.3.0.
|
||||
The Harbor project maintains release branches for the three most recent minor releases, each minor release will be maintained for approximately 9 months.
|
||||
|
||||
### Next Release
|
||||
|
|
@ -33,12 +32,12 @@ The activity for next release will be tracked in the [up-to-date project board](
|
|||
The following steps outline what to do when it's time to plan for and publish a release. Depending on the release (major/minor/patch), not all the following items are needed.
|
||||
|
||||
1. Prepare information about what's new in the release.
|
||||
* For every release, update documentation for changes that have happened in the release. See the [goharbor/website](https://github.com/goharbor/website) repo for more details on how to create documentation for a release. All documentation for a release should be published by the time the release is out.
|
||||
* For every release, update the documentation for changes that have happened in the release. See the [goharbor/website](https://github.com/goharbor/website) repo for more details on how to create documentation for a release. All documentation for a release should be published by the time the release is out.
|
||||
* For every release, write release notes. See [previous releases](https://github.com/goharbor/harbor/releases) for examples of what to include in release notes.
|
||||
* For a major/minor release, write a blog post that highlights new features in the release. Plan to publish this the same day as the release. Highlight the themes, or areas of focus, for the release. Some examples of themes are security, bug fixes, feature improvements. If there are any new features or workflows introduced in a release, consider writing additional blog posts to help users learn about the new features. Plan to publish these after the release date (all blogs don’t have to be published all at once).
|
||||
* For a major/minor release, write a blog post that highlights new features in the release. Plan to publish this on the same day as the release. Highlight the themes, or areas of focus, for the release. Some examples of themes are security, bug fixes, feature improvements. If there are any new features or workflows introduced in a release, consider writing additional blog posts to help users learn about the new features. Plan to publish these after the release date (all blogs don’t have to be published all at once).
|
||||
1. Release a new version. Make the new version, docs updates, and blog posts available.
|
||||
1. Announce the release and thank contributors. We should be doing the following for all releases.
|
||||
* In all messages to the community include a brief list of highlights and links to the new release blog, release notes, or download location. Also include shoutouts to community member contribution included in the release.
|
||||
* In all messages to the community include a brief list of highlights and links to the new release blog, release notes, or download location. Also include shoutouts to community members contributions included in the release.
|
||||
* Send an email to the community via the [mailing list](https://lists.cncf.io/g/harbor-users)
|
||||
* Post a message in the Harbor [slack channel](https://cloud-native.slack.com/archives/CC1E09J6S)
|
||||
* Post to social media. Maintainers are encouraged to also post or repost from the Harbor account to help spread the word.
|
||||
|
|
|
|||
|
|
@ -9,11 +9,11 @@ This document provides a link to the [Harbor Project board](https://github.com/o
|
|||
Discussion on the roadmap can take place in threads under [Issues](https://github.com/goharbor/harbor/issues) or in [community meetings](https://goharbor.io/community/). Please open and comment on an issue if you want to provide suggestions and feedback to an item in the roadmap. Please review the roadmap to avoid potential duplicated effort.
|
||||
|
||||
### How to add an item to the roadmap?
|
||||
Please open an issue to track any initiative on the roadmap of Harbor (Usually driven by new feature requests). We will work with and rely on our community to focus our efforts to improve Harbor.
|
||||
Please open an issue to track any initiative on the roadmap of Harbor (Usually driven by new feature requests). We will work with and rely on our community to focus our efforts on improving Harbor.
|
||||
|
||||
### Current Roadmap
|
||||
|
||||
The following table includes the current roadmap for Harbor. If you have any questions or would like to contribute to Harbor, please attend a [community meeting](https://goharbor.io/community/) to discuss with our team. If you don't know where to start, we are always looking for contributors that will help us reduce technical, automation, and documentation debt. Please take the timelines & dates as proposals and goals. Priorities and requirements change based on community feedback, roadblocks encountered, community contributions, etc. If you depend on a specific item, we encourage you to attend community meetings to get updated status information, or help us deliver that feature by contributing to Harbor.
|
||||
The following table includes the current roadmap for Harbor. If you have any questions or would like to contribute to Harbor, please attend a [community meeting](https://goharbor.io/community/) to discuss with our team. If you don't know where to start, we are always looking for contributors who will help us reduce technical, automation, and documentation debt. Please take the timelines & dates as proposals and goals. Priorities and requirements change based on community feedback, roadblocks encountered, community contributions, etc. If you depend on a specific item, we encourage you to attend community meetings to get updated status information, or help us deliver that feature by contributing to Harbor.
|
||||
|
||||
|
||||
`Last Updated: June 2022`
|
||||
|
|
@ -49,4 +49,4 @@ The following table includes the current roadmap for Harbor. If you have any que
|
|||
|I&AM and RBAC|Improved Multi-tenancy through granular access and ability to manage teams of users and robot accounts through workspaces|Dec 2020|
|
||||
|Observability|Expose Harbor metrics through Prometheus Integration|Mar 2021|
|
||||
|Tracing|Leverage OpenTelemetry for enhanced tracing capabilities and identify bottlenecks and improve performance |Mar 2021|
|
||||
|Image Signing|Leverage Sigstore Cosign to deliver persisting image signatures across image replications|Apr 2021|
|
||||
|Image Signing|Leverage Sigstore Cosign to deliver persistent image signatures across image replications|Apr 2021|
|
||||
|
|
|
|||
|
|
@ -336,6 +336,8 @@ paths:
|
|||
responses:
|
||||
'200':
|
||||
$ref: '#/responses/200'
|
||||
'400':
|
||||
$ref: '#/responses/400'
|
||||
'404':
|
||||
$ref: '#/responses/404'
|
||||
'500':
|
||||
|
|
@ -3029,6 +3031,8 @@ paths:
|
|||
type: string
|
||||
'401':
|
||||
$ref: '#/responses/401'
|
||||
'409':
|
||||
$ref: '#/responses/409'
|
||||
'500':
|
||||
$ref: '#/responses/500'
|
||||
'/usergroups/{group_id}':
|
||||
|
|
@ -3560,6 +3564,8 @@ paths:
|
|||
responses:
|
||||
'200':
|
||||
$ref: '#/responses/200'
|
||||
'400':
|
||||
$ref: '#/responses/400'
|
||||
'401':
|
||||
$ref: '#/responses/401'
|
||||
'403':
|
||||
|
|
@ -3998,6 +4004,8 @@ paths:
|
|||
responses:
|
||||
'200':
|
||||
$ref: '#/responses/200'
|
||||
'400':
|
||||
$ref: '#/responses/400'
|
||||
'401':
|
||||
$ref: '#/responses/401'
|
||||
'403':
|
||||
|
|
@ -6138,6 +6146,7 @@ paths:
|
|||
cve_id(exact match)
|
||||
cvss_score_v3(range condition)
|
||||
severity(exact match)
|
||||
status(exact match)
|
||||
repository_name(exact match)
|
||||
project_id(exact match)
|
||||
package(exact match)
|
||||
|
|
@ -7312,6 +7321,10 @@ definitions:
|
|||
type: string
|
||||
description: 'The bandwidth limit of proxy cache, in Kbps (kilobits per second). It limits the communication between Harbor and the upstream registry, not the client and the Harbor.'
|
||||
x-nullable: true
|
||||
max_upstream_conn:
|
||||
type: string
|
||||
description: 'The max connection per artifact to the upstream registry in current proxy cache project, if it is -1, no limit to upstream registry connections'
|
||||
x-nullable: true
|
||||
ProjectSummary:
|
||||
type: object
|
||||
properties:
|
||||
|
|
@ -7453,6 +7466,12 @@ definitions:
|
|||
type: boolean
|
||||
description: Whether to enable copy by chunk.
|
||||
x-isnullable: true
|
||||
single_active_replication:
|
||||
type: boolean
|
||||
description: |-
|
||||
Whether to skip execution until the previous active execution finishes,
|
||||
avoiding the execution of the same replication rules multiple times in parallel.
|
||||
x-isnullable: true # make this field optional to keep backward compatibility
|
||||
ReplicationTrigger:
|
||||
type: object
|
||||
properties:
|
||||
|
|
@ -10066,6 +10085,9 @@ definitions:
|
|||
severity:
|
||||
type: string
|
||||
description: the severity of the vulnerability
|
||||
status:
|
||||
type: string
|
||||
description: the status of the vulnerability, example "fixed", "won't fix"
|
||||
cvss_v3_score:
|
||||
type: number
|
||||
format: float
|
||||
|
|
|
|||
|
|
@ -176,7 +176,7 @@ log:
|
|||
# port: 5140
|
||||
|
||||
#This attribute is for migrator to detect the version of the .cfg file, DO NOT MODIFY!
|
||||
_version: 2.13.0
|
||||
_version: 2.14.0
|
||||
|
||||
# Uncomment external_database if using external database.
|
||||
# external_database:
|
||||
|
|
|
|||
|
|
@ -0,0 +1,11 @@
|
|||
ALTER TABLE role_permission ALTER COLUMN id TYPE BIGINT;
|
||||
ALTER SEQUENCE role_permission_id_seq AS BIGINT;
|
||||
|
||||
ALTER TABLE permission_policy ALTER COLUMN id TYPE BIGINT;
|
||||
ALTER SEQUENCE permission_policy_id_seq AS BIGINT;
|
||||
|
||||
ALTER TABLE role_permission ALTER COLUMN permission_policy_id TYPE BIGINT;
|
||||
|
||||
ALTER TABLE vulnerability_record ADD COLUMN IF NOT EXISTS status text;
|
||||
|
||||
ALTER TABLE replication_policy ADD COLUMN IF NOT EXISTS single_active_replication boolean;
|
||||
|
|
@ -0,0 +1,17 @@
|
|||
/*
|
||||
Initialize skip_audit_log_database configuration based on existing audit log usage - Only insert the configuration if it doesn't already exist
|
||||
1. If tables exist and show evidence of previous usage
|
||||
set skip_audit_log_database to false
|
||||
2. If tables exist but show no evidence of usage, don't create the configuration record
|
||||
*/
|
||||
DO $$
|
||||
BEGIN
|
||||
IF EXISTS (SELECT 1 FROM properties WHERE k = 'skip_audit_log_database') THEN
|
||||
RETURN;
|
||||
END IF;
|
||||
|
||||
IF (SELECT last_value FROM audit_log_id_seq) > 1
|
||||
OR (SELECT last_value FROM audit_log_ext_id_seq) > 1 THEN
|
||||
INSERT INTO properties (k, v) VALUES ('skip_audit_log_database', 'false');
|
||||
END IF;
|
||||
END $$;
|
||||
|
|
@ -18,7 +18,7 @@ TIMESTAMP=$(shell date +"%Y%m%d")
|
|||
|
||||
# docker parameters
|
||||
DOCKERCMD=$(shell which docker)
|
||||
DOCKERBUILD=$(DOCKERCMD) build --no-cache
|
||||
DOCKERBUILD=$(DOCKERCMD) build --no-cache --network=$(DOCKERNETWORK)
|
||||
DOCKERBUILD_WITH_PULL_PARA=$(DOCKERBUILD) --pull=$(PULL_BASE_FROM_DOCKERHUB)
|
||||
DOCKERRMIMAGE=$(DOCKERCMD) rmi
|
||||
DOCKERIMAGES=$(DOCKERCMD) images
|
||||
|
|
@ -154,7 +154,7 @@ _build_trivy_adapter:
|
|||
$(call _extract_archive, $(TRIVY_ADAPTER_DOWNLOAD_URL), $(DOCKERFILEPATH_TRIVY_ADAPTER)/binary/) ; \
|
||||
else \
|
||||
echo "Building Trivy adapter $(TRIVYADAPTERVERSION) from sources..." ; \
|
||||
cd $(DOCKERFILEPATH_TRIVY_ADAPTER) && $(DOCKERFILEPATH_TRIVY_ADAPTER)/builder.sh $(TRIVYADAPTERVERSION) && cd - ; \
|
||||
cd $(DOCKERFILEPATH_TRIVY_ADAPTER) && $(DOCKERFILEPATH_TRIVY_ADAPTER)/builder.sh $(TRIVYADAPTERVERSION) $(GOBUILDIMAGE) $(DOCKERNETWORK) && cd - ; \
|
||||
fi ; \
|
||||
echo "Building Trivy adapter container for photon..." ; \
|
||||
$(DOCKERBUILD_WITH_PULL_PARA) --build-arg harbor_base_image_version=$(BASEIMAGETAG) \
|
||||
|
|
@ -178,7 +178,7 @@ _build_registry:
|
|||
rm -rf $(DOCKERFILEPATH_REG)/binary && mkdir -p $(DOCKERFILEPATH_REG)/binary && \
|
||||
$(call _get_binary, $(REGISTRYURL), $(DOCKERFILEPATH_REG)/binary/registry); \
|
||||
else \
|
||||
cd $(DOCKERFILEPATH_REG) && $(DOCKERFILEPATH_REG)/builder $(REGISTRY_SRC_TAG) $(DISTRIBUTION_SRC) && cd - ; \
|
||||
cd $(DOCKERFILEPATH_REG) && $(DOCKERFILEPATH_REG)/builder $(REGISTRY_SRC_TAG) $(DISTRIBUTION_SRC) $(GOBUILDIMAGE) $(DOCKERNETWORK) && cd - ; \
|
||||
fi
|
||||
@echo "building registry container for photon..."
|
||||
@chmod 655 $(DOCKERFILEPATH_REG)/binary/registry && $(DOCKERBUILD_WITH_PULL_PARA) --build-arg harbor_base_image_version=$(BASEIMAGETAG) --build-arg harbor_base_namespace=$(BASEIMAGENAMESPACE) -f $(DOCKERFILEPATH_REG)/$(DOCKERFILENAME_REG) -t $(DOCKERIMAGENAME_REG):$(VERSIONTAG) .
|
||||
|
|
@ -233,10 +233,17 @@ define _build_base
|
|||
fi
|
||||
endef
|
||||
|
||||
build: _build_prepare _build_db _build_portal _build_core _build_jobservice _build_log _build_nginx _build_registry _build_registryctl _build_trivy_adapter _build_redis _compile_and_build_exporter
|
||||
ifeq ($(BUILD_INSTALLER), true)
|
||||
buildcompt: _build_prepare _build_db _build_portal _build_core _build_jobservice _build_log _build_nginx _build_registry _build_registryctl _build_trivy_adapter _build_redis _compile_and_build_exporter
|
||||
else
|
||||
buildcompt: _build_db _build_portal _build_core _build_jobservice _build_nginx _build_registry _build_registryctl _build_trivy_adapter _build_redis _compile_and_build_exporter
|
||||
endif
|
||||
|
||||
build: buildcompt
|
||||
@if [ -n "$(REGISTRYUSER)" ] && [ -n "$(REGISTRYPASSWORD)" ] ; then \
|
||||
docker logout ; \
|
||||
fi
|
||||
|
||||
cleanimage:
|
||||
@echo "cleaning image for photon..."
|
||||
- $(DOCKERRMIMAGE) -f $(DOCKERIMAGENAME_PORTAL):$(VERSIONTAG)
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
FROM photon:5.0
|
||||
|
||||
RUN tdnf install -y python3 python3-pip python3-PyYAML python3-jinja2 && tdnf clean all
|
||||
RUN pip3 install pipenv==2022.1.8
|
||||
RUN pip3 install pipenv==2025.0.3
|
||||
|
||||
#To install only htpasswd binary from photon package httpd
|
||||
RUN tdnf install -y rpm cpio apr-util
|
||||
|
|
|
|||
|
|
@ -12,4 +12,4 @@ pylint = "*"
|
|||
pytest = "*"
|
||||
|
||||
[requires]
|
||||
python_version = "3.9.1"
|
||||
python_version = "3.13"
|
||||
|
|
|
|||
|
|
@ -1,11 +1,11 @@
|
|||
{
|
||||
"_meta": {
|
||||
"hash": {
|
||||
"sha256": "0c84f574a48755d88f78a64d754b3f834a72f2a86808370dd5f3bf3e650bfa13"
|
||||
"sha256": "d3a89b8575c29b9f822b892ffd31fd4a997effb1ebf3e3ed061a41e2d04b4490"
|
||||
},
|
||||
"pipfile-spec": 6,
|
||||
"requires": {
|
||||
"python_version": "3.9.1"
|
||||
"python_version": "3.13"
|
||||
},
|
||||
"sources": [
|
||||
{
|
||||
|
|
@ -18,157 +18,122 @@
|
|||
"default": {
|
||||
"click": {
|
||||
"hashes": [
|
||||
"sha256:8c04c11192119b1ef78ea049e0a6f0463e4c48ef00a30160c704337586f3ad7a",
|
||||
"sha256:fba402a4a47334742d782209a7c79bc448911afe1149d07bdabdf480b3e2f4b6"
|
||||
"sha256:27c491cc05d968d271d5a1db13e3b5a184636d9d930f148c50b038f0d0646202",
|
||||
"sha256:61a3265b914e850b85317d0b3109c7f8cd35a670f963866005d6ef1d5175a12b"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==8.0.1"
|
||||
"markers": "python_version >= '3.10'",
|
||||
"version": "==8.2.1"
|
||||
},
|
||||
"packaging": {
|
||||
"hashes": [
|
||||
"sha256:5b327ac1320dc863dca72f4514ecc086f31186744b84a230374cc1fd776feae5",
|
||||
"sha256:67714da7f7bc052e064859c05c595155bd1ee9f69f76557e21f051443c20947a"
|
||||
"sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484",
|
||||
"sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==20.9"
|
||||
},
|
||||
"pyparsing": {
|
||||
"hashes": [
|
||||
"sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1",
|
||||
"sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b"
|
||||
],
|
||||
"markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'",
|
||||
"version": "==2.4.7"
|
||||
"markers": "python_version >= '3.8'",
|
||||
"version": "==25.0"
|
||||
}
|
||||
},
|
||||
"develop": {
|
||||
"astroid": {
|
||||
"hashes": [
|
||||
"sha256:4db03ab5fc3340cf619dbc25e42c2cc3755154ce6009469766d7143d1fc2ee4e",
|
||||
"sha256:8a398dfce302c13f14bab13e2b14fe385d32b73f4e4853b9bdfb64598baa1975"
|
||||
"sha256:104fb9cb9b27ea95e847a94c003be03a9e039334a8ebca5ee27dafaf5c5711eb",
|
||||
"sha256:c332157953060c6deb9caa57303ae0d20b0fbdb2e59b4a4f2a6ba49d0a7961ce"
|
||||
],
|
||||
"markers": "python_version ~= '3.6'",
|
||||
"version": "==2.5.6"
|
||||
"markers": "python_full_version >= '3.9.0'",
|
||||
"version": "==3.3.10"
|
||||
},
|
||||
"attrs": {
|
||||
"dill": {
|
||||
"hashes": [
|
||||
"sha256:149e90d6d8ac20db7a955ad60cf0e6881a3f20d37096140088356da6c716b0b1",
|
||||
"sha256:ef6aaac3ca6cd92904cdd0d83f629a15f18053ec84e6432106f7a4d04ae4f5fb"
|
||||
"sha256:0633f1d2df477324f53a895b02c901fb961bdbf65a17122586ea7019292cbcf0",
|
||||
"sha256:44f54bf6412c2c8464c14e8243eb163690a9800dbe2c367330883b19c7561049"
|
||||
],
|
||||
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'",
|
||||
"version": "==21.2.0"
|
||||
"markers": "python_version >= '3.8'",
|
||||
"version": "==0.4.0"
|
||||
},
|
||||
"iniconfig": {
|
||||
"hashes": [
|
||||
"sha256:011e24c64b7f47f6ebd835bb12a743f2fbe9a26d4cecaa7f53bc4f35ee9da8b3",
|
||||
"sha256:bc3af051d7d14b2ee5ef9969666def0cd1a000e121eaea580d4a313df4b37f32"
|
||||
"sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7",
|
||||
"sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760"
|
||||
],
|
||||
"version": "==1.1.1"
|
||||
"markers": "python_version >= '3.8'",
|
||||
"version": "==2.1.0"
|
||||
},
|
||||
"isort": {
|
||||
"hashes": [
|
||||
"sha256:0a943902919f65c5684ac4e0154b1ad4fac6dcaa5d9f3426b732f1c8b5419be6",
|
||||
"sha256:2bb1680aad211e3c9944dbce1d4ba09a989f04e238296c87fe2139faa26d655d"
|
||||
"sha256:1cb5df28dfbc742e490c5e41bad6da41b805b0a8be7bc93cd0fb2a8a890ac450",
|
||||
"sha256:2dc5d7f65c9678d94c88dfc29161a320eec67328bc97aad576874cb4be1e9615"
|
||||
],
|
||||
"markers": "python_version >= '3.6' and python_version < '4.0'",
|
||||
"version": "==5.8.0"
|
||||
},
|
||||
"lazy-object-proxy": {
|
||||
"hashes": [
|
||||
"sha256:17e0967ba374fc24141738c69736da90e94419338fd4c7c7bef01ee26b339653",
|
||||
"sha256:1fee665d2638491f4d6e55bd483e15ef21f6c8c2095f235fef72601021e64f61",
|
||||
"sha256:22ddd618cefe54305df49e4c069fa65715be4ad0e78e8d252a33debf00f6ede2",
|
||||
"sha256:24a5045889cc2729033b3e604d496c2b6f588c754f7a62027ad4437a7ecc4837",
|
||||
"sha256:410283732af311b51b837894fa2f24f2c0039aa7f220135192b38fcc42bd43d3",
|
||||
"sha256:4732c765372bd78a2d6b2150a6e99d00a78ec963375f236979c0626b97ed8e43",
|
||||
"sha256:489000d368377571c6f982fba6497f2aa13c6d1facc40660963da62f5c379726",
|
||||
"sha256:4f60460e9f1eb632584c9685bccea152f4ac2130e299784dbaf9fae9f49891b3",
|
||||
"sha256:5743a5ab42ae40caa8421b320ebf3a998f89c85cdc8376d6b2e00bd12bd1b587",
|
||||
"sha256:85fb7608121fd5621cc4377a8961d0b32ccf84a7285b4f1d21988b2eae2868e8",
|
||||
"sha256:9698110e36e2df951c7c36b6729e96429c9c32b3331989ef19976592c5f3c77a",
|
||||
"sha256:9d397bf41caad3f489e10774667310d73cb9c4258e9aed94b9ec734b34b495fd",
|
||||
"sha256:b579f8acbf2bdd9ea200b1d5dea36abd93cabf56cf626ab9c744a432e15c815f",
|
||||
"sha256:b865b01a2e7f96db0c5d12cfea590f98d8c5ba64ad222300d93ce6ff9138bcad",
|
||||
"sha256:bf34e368e8dd976423396555078def5cfc3039ebc6fc06d1ae2c5a65eebbcde4",
|
||||
"sha256:c6938967f8528b3668622a9ed3b31d145fab161a32f5891ea7b84f6b790be05b",
|
||||
"sha256:d1c2676e3d840852a2de7c7d5d76407c772927addff8d742b9808fe0afccebdf",
|
||||
"sha256:d7124f52f3bd259f510651450e18e0fd081ed82f3c08541dffc7b94b883aa981",
|
||||
"sha256:d900d949b707778696fdf01036f58c9876a0d8bfe116e8d220cfd4b15f14e741",
|
||||
"sha256:ebfd274dcd5133e0afae738e6d9da4323c3eb021b3e13052d8cbd0e457b1256e",
|
||||
"sha256:ed361bb83436f117f9917d282a456f9e5009ea12fd6de8742d1a4752c3017e93",
|
||||
"sha256:f5144c75445ae3ca2057faac03fda5a902eff196702b0a24daf1d6ce0650514b"
|
||||
],
|
||||
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5'",
|
||||
"version": "==1.6.0"
|
||||
"markers": "python_full_version >= '3.9.0'",
|
||||
"version": "==6.0.1"
|
||||
},
|
||||
"mccabe": {
|
||||
"hashes": [
|
||||
"sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42",
|
||||
"sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f"
|
||||
"sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325",
|
||||
"sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"
|
||||
],
|
||||
"version": "==0.6.1"
|
||||
"markers": "python_version >= '3.6'",
|
||||
"version": "==0.7.0"
|
||||
},
|
||||
"packaging": {
|
||||
"hashes": [
|
||||
"sha256:5b327ac1320dc863dca72f4514ecc086f31186744b84a230374cc1fd776feae5",
|
||||
"sha256:67714da7f7bc052e064859c05c595155bd1ee9f69f76557e21f051443c20947a"
|
||||
"sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484",
|
||||
"sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==20.9"
|
||||
"markers": "python_version >= '3.8'",
|
||||
"version": "==25.0"
|
||||
},
|
||||
"platformdirs": {
|
||||
"hashes": [
|
||||
"sha256:3d512d96e16bcb959a814c9f348431070822a6496326a4be0911c40b5a74c2bc",
|
||||
"sha256:ff7059bb7eb1179e2685604f4aaf157cfd9535242bd23742eadc3c13542139b4"
|
||||
],
|
||||
"markers": "python_version >= '3.9'",
|
||||
"version": "==4.3.8"
|
||||
},
|
||||
"pluggy": {
|
||||
"hashes": [
|
||||
"sha256:15b2acde666561e1298d71b523007ed7364de07029219b604cf808bfa1c765b0",
|
||||
"sha256:966c145cd83c96502c3c3868f50408687b38434af77734af1e9ca461a4081d2d"
|
||||
"sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3",
|
||||
"sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746"
|
||||
],
|
||||
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
|
||||
"version": "==0.13.1"
|
||||
"markers": "python_version >= '3.9'",
|
||||
"version": "==1.6.0"
|
||||
},
|
||||
"py": {
|
||||
"pygments": {
|
||||
"hashes": [
|
||||
"sha256:21b81bda15b66ef5e1a777a21c4dcd9c20ad3efd0b3f817e7a809035269e1bd3",
|
||||
"sha256:3b80836aa6d1feeaa108e046da6423ab8f6ceda6468545ae8d02d9d58d18818a"
|
||||
"sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887",
|
||||
"sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b"
|
||||
],
|
||||
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
|
||||
"version": "==1.10.0"
|
||||
"markers": "python_version >= '3.8'",
|
||||
"version": "==2.19.2"
|
||||
},
|
||||
"pylint": {
|
||||
"hashes": [
|
||||
"sha256:586d8fa9b1891f4b725f587ef267abe2a1bad89d6b184520c7f07a253dd6e217",
|
||||
"sha256:f7e2072654a6b6afdf5e2fb38147d3e2d2d43c89f648637baab63e026481279b"
|
||||
"sha256:2b11de8bde49f9c5059452e0c310c079c746a0a8eeaa789e5aa966ecc23e4559",
|
||||
"sha256:43860aafefce92fca4cf6b61fe199cdc5ae54ea28f9bf4cd49de267b5195803d"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==2.8.2"
|
||||
},
|
||||
"pyparsing": {
|
||||
"hashes": [
|
||||
"sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1",
|
||||
"sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b"
|
||||
],
|
||||
"markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'",
|
||||
"version": "==2.4.7"
|
||||
"markers": "python_full_version >= '3.9.0'",
|
||||
"version": "==3.3.7"
|
||||
},
|
||||
"pytest": {
|
||||
"hashes": [
|
||||
"sha256:50bcad0a0b9c5a72c8e4e7c9855a3ad496ca6a881a3641b4260605450772c54b",
|
||||
"sha256:91ef2131a9bd6be8f76f1f08eac5c5317221d6ad1e143ae03894b862e8976890"
|
||||
"sha256:539c70ba6fcead8e78eebbf1115e8b589e7565830d7d006a8723f19ac8a0afb7",
|
||||
"sha256:7c67fd69174877359ed9371ec3af8a3d2b04741818c51e5e99cc1742251fa93c"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==6.2.4"
|
||||
"markers": "python_version >= '3.9'",
|
||||
"version": "==8.4.1"
|
||||
},
|
||||
"toml": {
|
||||
"tomlkit": {
|
||||
"hashes": [
|
||||
"sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b",
|
||||
"sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"
|
||||
"sha256:430cf247ee57df2b94ee3fbe588e71d362a941ebb545dec29b53961d61add2a1",
|
||||
"sha256:c89c649d79ee40629a9fda55f8ace8c6a1b42deb912b2a8fd8d942ddadb606b0"
|
||||
],
|
||||
"markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'",
|
||||
"version": "==0.10.2"
|
||||
},
|
||||
"wrapt": {
|
||||
"hashes": [
|
||||
"sha256:b62ffa81fb85f4332a4f609cab4ac40709470da05643a082ec1eb88e6d9b97d7"
|
||||
],
|
||||
"version": "==1.12.1"
|
||||
"markers": "python_version >= '3.8'",
|
||||
"version": "==0.13.3"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@ from migrations import accept_versions
|
|||
@click.command()
|
||||
@click.option('-i', '--input', 'input_', required=True, help="The path of original config file")
|
||||
@click.option('-o', '--output', default='', help="the path of output config file")
|
||||
@click.option('-t', '--target', default='2.13.0', help="target version of input path")
|
||||
@click.option('-t', '--target', default='2.14.0', help="target version of input path")
|
||||
def migrate(input_, output, target):
|
||||
"""
|
||||
migrate command will migrate config file style to specific version
|
||||
|
|
|
|||
|
|
@ -2,4 +2,4 @@ import os
|
|||
|
||||
MIGRATION_BASE_DIR = os.path.dirname(__file__)
|
||||
|
||||
accept_versions = {'1.9.0', '1.10.0', '2.0.0', '2.1.0', '2.2.0', '2.3.0', '2.4.0', '2.5.0', '2.6.0', '2.7.0', '2.8.0', '2.9.0','2.10.0', '2.11.0', '2.12.0', '2.13.0'}
|
||||
accept_versions = {'1.9.0', '1.10.0', '2.0.0', '2.1.0', '2.2.0', '2.3.0', '2.4.0', '2.5.0', '2.6.0', '2.7.0', '2.8.0', '2.9.0','2.10.0', '2.11.0', '2.12.0', '2.13.0', '2.14.0'}
|
||||
|
|
@ -0,0 +1,21 @@
|
|||
import os
|
||||
from jinja2 import Environment, FileSystemLoader, StrictUndefined, select_autoescape
|
||||
from utils.migration import read_conf
|
||||
|
||||
revision = '2.14.0'
|
||||
down_revisions = ['2.13.0']
|
||||
|
||||
def migrate(input_cfg, output_cfg):
|
||||
current_dir = os.path.dirname(__file__)
|
||||
tpl = Environment(
|
||||
loader=FileSystemLoader(current_dir),
|
||||
undefined=StrictUndefined,
|
||||
trim_blocks=True,
|
||||
lstrip_blocks=True,
|
||||
autoescape = select_autoescape()
|
||||
).get_template('harbor.yml.jinja')
|
||||
|
||||
config_dict = read_conf(input_cfg)
|
||||
|
||||
with open(output_cfg, 'w') as f:
|
||||
f.write(tpl.render(**config_dict))
|
||||
|
|
@ -0,0 +1,775 @@
|
|||
# Configuration file of Harbor
|
||||
|
||||
# The IP address or hostname to access admin UI and registry service.
|
||||
# DO NOT use localhost or 127.0.0.1, because Harbor needs to be accessed by external clients.
|
||||
hostname: {{ hostname }}
|
||||
|
||||
# http related config
|
||||
{% if http is defined %}
|
||||
http:
|
||||
# port for http, default is 80. If https enabled, this port will redirect to https port
|
||||
port: {{ http.port }}
|
||||
{% else %}
|
||||
# http:
|
||||
# # port for http, default is 80. If https enabled, this port will redirect to https port
|
||||
# port: 80
|
||||
{% endif %}
|
||||
|
||||
{% if https is defined %}
|
||||
# https related config
|
||||
https:
|
||||
# https port for harbor, default is 443
|
||||
port: {{ https.port }}
|
||||
# The path of cert and key files for nginx
|
||||
certificate: {{ https.certificate }}
|
||||
private_key: {{ https.private_key }}
|
||||
# enable strong ssl ciphers (default: false)
|
||||
{% if strong_ssl_ciphers is defined %}
|
||||
strong_ssl_ciphers: {{ strong_ssl_ciphers | lower }}
|
||||
{% else %}
|
||||
strong_ssl_ciphers: false
|
||||
{% endif %}
|
||||
{% else %}
|
||||
# https related config
|
||||
# https:
|
||||
# # https port for harbor, default is 443
|
||||
# port: 443
|
||||
# # The path of cert and key files for nginx
|
||||
# certificate: /your/certificate/path
|
||||
# private_key: /your/private/key/path
|
||||
# enable strong ssl ciphers (default: false)
|
||||
# strong_ssl_ciphers: false
|
||||
{% endif %}
|
||||
|
||||
# # Harbor will set ipv4 enabled only by default if this block is not configured
|
||||
# # Otherwise, please uncomment this block to configure your own ip_family stacks
|
||||
{% if ip_family is defined %}
|
||||
ip_family:
|
||||
# ipv6Enabled set to true if ipv6 is enabled in docker network, currently it affected the nginx related component
|
||||
{% if ip_family.ipv6 is defined %}
|
||||
ipv6:
|
||||
enabled: {{ ip_family.ipv6.enabled | lower }}
|
||||
{% else %}
|
||||
ipv6:
|
||||
enabled: false
|
||||
{% endif %}
|
||||
# ipv4Enabled set to true by default, currently it affected the nginx related component
|
||||
{% if ip_family.ipv4 is defined %}
|
||||
ipv4:
|
||||
enabled: {{ ip_family.ipv4.enabled | lower }}
|
||||
{% else %}
|
||||
ipv4:
|
||||
enabled: true
|
||||
{% endif %}
|
||||
{% else %}
|
||||
# ip_family:
|
||||
# # ipv6Enabled set to true if ipv6 is enabled in docker network, currently it affected the nginx related component
|
||||
# ipv6:
|
||||
# enabled: false
|
||||
# # ipv4Enabled set to true by default, currently it affected the nginx related component
|
||||
# ipv4:
|
||||
# enabled: true
|
||||
{% endif %}
|
||||
|
||||
{% if internal_tls is defined %}
|
||||
# Uncomment following will enable tls communication between all harbor components
|
||||
internal_tls:
|
||||
# set enabled to true means internal tls is enabled
|
||||
enabled: {{ internal_tls.enabled | lower }}
|
||||
{% if internal_tls.dir is defined %}
|
||||
# put your cert and key files on dir
|
||||
dir: {{ internal_tls.dir }}
|
||||
{% endif %}
|
||||
{% else %}
|
||||
# internal_tls:
|
||||
# # set enabled to true means internal tls is enabled
|
||||
# enabled: true
|
||||
# # put your cert and key files on dir
|
||||
# dir: /etc/harbor/tls/internal
|
||||
{% endif %}
|
||||
|
||||
# Uncomment external_url if you want to enable external proxy
|
||||
# And when it enabled the hostname will no longer used
|
||||
{% if external_url is defined %}
|
||||
external_url: {{ external_url }}
|
||||
{% else %}
|
||||
# external_url: https://reg.mydomain.com:8433
|
||||
{% endif %}
|
||||
|
||||
# The initial password of Harbor admin
|
||||
# It only works in first time to install harbor
|
||||
# Remember Change the admin password from UI after launching Harbor.
|
||||
{% if harbor_admin_password is defined %}
|
||||
harbor_admin_password: {{ harbor_admin_password }}
|
||||
{% else %}
|
||||
harbor_admin_password: Harbor12345
|
||||
{% endif %}
|
||||
|
||||
# Harbor DB configuration
|
||||
database:
|
||||
{% if database is defined %}
|
||||
# The password for the root user of Harbor DB. Change this before any production use.
|
||||
password: {{ database.password}}
|
||||
# The maximum number of connections in the idle connection pool. If it <=0, no idle connections are retained.
|
||||
max_idle_conns: {{ database.max_idle_conns }}
|
||||
# The maximum number of open connections to the database. If it <= 0, then there is no limit on the number of open connections.
|
||||
# Note: the default number of connections is 1024 for postgres of harbor.
|
||||
max_open_conns: {{ database.max_open_conns }}
|
||||
# The maximum amount of time a connection may be reused. Expired connections may be closed lazily before reuse. If it <= 0, connections are not closed due to a connection's age.
|
||||
# The value is a duration string. A duration string is a possibly signed sequence of decimal numbers, each with optional fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
|
||||
{% if database.conn_max_lifetime is defined %}
|
||||
conn_max_lifetime: {{ database.conn_max_lifetime }}
|
||||
{% else %}
|
||||
conn_max_lifetime: 5m
|
||||
{% endif %}
|
||||
# The maximum amount of time a connection may be idle. Expired connections may be closed lazily before reuse. If it <= 0, connections are not closed due to a connection's idle time.
|
||||
# The value is a duration string. A duration string is a possibly signed sequence of decimal numbers, each with optional fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
|
||||
{% if database.conn_max_idle_time is defined %}
|
||||
conn_max_idle_time: {{ database.conn_max_idle_time }}
|
||||
{% else %}
|
||||
conn_max_idle_time: 0
|
||||
{% endif %}
|
||||
{% else %}
|
||||
# The password for the root user of Harbor DB. Change this before any production use.
|
||||
password: root123
|
||||
# The maximum number of connections in the idle connection pool. If it <=0, no idle connections are retained.
|
||||
max_idle_conns: 100
|
||||
# The maximum number of open connections to the database. If it <= 0, then there is no limit on the number of open connections.
|
||||
# Note: the default number of connections is 1024 for postgres of harbor.
|
||||
max_open_conns: 900
|
||||
# The maximum amount of time a connection may be reused. Expired connections may be closed lazily before reuse. If it <= 0, connections are not closed due to a connection's age.
|
||||
# The value is a duration string. A duration string is a possibly signed sequence of decimal numbers, each with optional fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
|
||||
conn_max_lifetime: 5m
|
||||
# The maximum amount of time a connection may be idle. Expired connections may be closed lazily before reuse. If it <= 0, connections are not closed due to a connection's idle time.
|
||||
# The value is a duration string. A duration string is a possibly signed sequence of decimal numbers, each with optional fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
|
||||
conn_max_idle_time: 0
|
||||
{% endif %}
|
||||
|
||||
{% if data_volume is defined %}
|
||||
# The default data volume
|
||||
data_volume: {{ data_volume }}
|
||||
{% else %}
|
||||
# The default data volume
|
||||
data_volume: /data
|
||||
{% endif %}
|
||||
|
||||
# Harbor Storage settings by default is using /data dir on local filesystem
|
||||
# Uncomment storage_service setting If you want to using external storage
|
||||
{% if storage_service is defined %}
|
||||
storage_service:
|
||||
{% for key, value in storage_service.items() %}
|
||||
{% if key == 'ca_bundle' %}
|
||||
# # ca_bundle is the path to the custom root ca certificate, which will be injected into the truststore
|
||||
# # of registry's and chart repository's containers. This is usually needed when the user hosts a internal storage with self signed certificate.
|
||||
ca_bundle: {{ value if value is not none else '' }}
|
||||
{% elif key == 'redirect' %}
|
||||
# # set disable to true when you want to disable registry redirect
|
||||
redirect:
|
||||
{% if storage_service.redirect.disabled is defined %}
|
||||
disable: {{ storage_service.redirect.disabled | lower}}
|
||||
{% else %}
|
||||
disable: {{ storage_service.redirect.disable | lower}}
|
||||
{% endif %}
|
||||
{% else %}
|
||||
# # storage backend, default is filesystem, options include filesystem, azure, gcs, s3, swift and oss
|
||||
# # for more info about this configuration please refer https://distribution.github.io/distribution/about/configuration/
|
||||
# # and https://distribution.github.io/distribution/storage-drivers/
|
||||
{{ key }}:
|
||||
{% for k, v in value.items() %}
|
||||
{{ k }}: {{ v if v is not none else '' }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% else %}
|
||||
# storage_service:
|
||||
# # ca_bundle is the path to the custom root ca certificate, which will be injected into the truststore
|
||||
# # of registry's and chart repository's containers. This is usually needed when the user hosts a internal storage with self signed certificate.
|
||||
# ca_bundle:
|
||||
|
||||
# # storage backend, default is filesystem, options include filesystem, azure, gcs, s3, swift and oss
|
||||
# # for more info about this configuration please refer https://distribution.github.io/distribution/about/configuration/
|
||||
# # and https://distribution.github.io/distribution/storage-drivers/
|
||||
# filesystem:
|
||||
# maxthreads: 100
|
||||
# # set disable to true when you want to disable registry redirect
|
||||
# redirect:
|
||||
# disable: false
|
||||
{% endif %}
|
||||
|
||||
# Trivy configuration
|
||||
#
|
||||
# Trivy DB contains vulnerability information from NVD, Red Hat, and many other upstream vulnerability databases.
|
||||
# It is downloaded by Trivy from the GitHub release page https://github.com/aquasecurity/trivy-db/releases and cached
|
||||
# in the local file system. In addition, the database contains the update timestamp so Trivy can detect whether it
|
||||
# should download a newer version from the Internet or use the cached one. Currently, the database is updated every
|
||||
# 12 hours and published as a new release to GitHub.
|
||||
{% if trivy is defined %}
|
||||
trivy:
|
||||
# ignoreUnfixed The flag to display only fixed vulnerabilities
|
||||
{% if trivy.ignore_unfixed is defined %}
|
||||
ignore_unfixed: {{ trivy.ignore_unfixed | lower }}
|
||||
{% else %}
|
||||
ignore_unfixed: false
|
||||
{% endif %}
|
||||
# skipUpdate The flag to enable or disable Trivy DB downloads from GitHub
|
||||
#
|
||||
# You might want to enable this flag in test or CI/CD environments to avoid GitHub rate limiting issues.
|
||||
# If the flag is enabled you have to download the `trivy-offline.tar.gz` archive manually, extract `trivy.db` and
|
||||
# `metadata.json` files and mount them in the `/home/scanner/.cache/trivy/db` path.
|
||||
{% if trivy.skip_update is defined %}
|
||||
skip_update: {{ trivy.skip_update | lower }}
|
||||
{% else %}
|
||||
skip_update: false
|
||||
{% endif %}
|
||||
{% if trivy.skip_java_db_update is defined %}
|
||||
# skipJavaDBUpdate If the flag is enabled you have to manually download the `trivy-java.db` file and mount it in the
|
||||
# `/home/scanner/.cache/trivy/java-db/trivy-java.db` path
|
||||
skip_java_db_update: {{ trivy.skip_java_db_update | lower }}
|
||||
{% else %}
|
||||
skip_java_db_update: false
|
||||
{% endif %}
|
||||
#
|
||||
{% if trivy.offline_scan is defined %}
|
||||
offline_scan: {{ trivy.offline_scan | lower }}
|
||||
{% else %}
|
||||
offline_scan: false
|
||||
{% endif %}
|
||||
#
|
||||
# Comma-separated list of what security issues to detect. Possible values are `vuln`, `config` and `secret`. Defaults to `vuln`.
|
||||
{% if trivy.security_check is defined %}
|
||||
security_check: {{ trivy.security_check }}
|
||||
{% else %}
|
||||
security_check: vuln
|
||||
{% endif %}
|
||||
#
|
||||
# insecure The flag to skip verifying registry certificate
|
||||
{% if trivy.insecure is defined %}
|
||||
insecure: {{ trivy.insecure | lower }}
|
||||
{% else %}
|
||||
insecure: false
|
||||
{% endif %}
|
||||
#
|
||||
{% if trivy.timeout is defined %}
|
||||
# timeout The duration to wait for scan completion.
|
||||
# There is upper bound of 30 minutes defined in scan job. So if this `timeout` is larger than 30m0s, it will also timeout at 30m0s.
|
||||
timeout: {{ trivy.timeout}}
|
||||
{% else %}
|
||||
timeout: 5m0s
|
||||
{% endif %}
|
||||
#
|
||||
# github_token The GitHub access token to download Trivy DB
|
||||
#
|
||||
# Anonymous downloads from GitHub are subject to the limit of 60 requests per hour. Normally such rate limit is enough
|
||||
# for production operations. If, for any reason, it's not enough, you could increase the rate limit to 5000
|
||||
# requests per hour by specifying the GitHub access token. For more details on GitHub rate limiting please consult
|
||||
# https://developer.github.com/v3/#rate-limiting
|
||||
#
|
||||
# You can create a GitHub token by following the instructions in
|
||||
# https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line
|
||||
#
|
||||
{% if trivy.github_token is defined %}
|
||||
github_token: {{ trivy.github_token }}
|
||||
{% else %}
|
||||
# github_token: xxx
|
||||
{% endif %}
|
||||
{% else %}
|
||||
# trivy:
|
||||
# # ignoreUnfixed The flag to display only fixed vulnerabilities
|
||||
# ignore_unfixed: false
|
||||
# # skipUpdate The flag to enable or disable Trivy DB downloads from GitHub
|
||||
# #
|
||||
# # You might want to enable this flag in test or CI/CD environments to avoid GitHub rate limiting issues.
|
||||
# # If the flag is enabled you have to download the `trivy-offline.tar.gz` archive manually, extract `trivy.db` and
|
||||
# # `metadata.json` files and mount them in the `/home/scanner/.cache/trivy/db` path.
|
||||
# skip_update: false
|
||||
# #
|
||||
# # skipJavaDBUpdate If the flag is enabled you have to manually download the `trivy-java.db` file and mount it in the
|
||||
# # `/home/scanner/.cache/trivy/java-db/trivy-java.db` path
|
||||
# skip_java_db_update: false
|
||||
# #
|
||||
# #The offline_scan option prevents Trivy from sending API requests to identify dependencies.
|
||||
# # Scanning JAR files and pom.xml may require Internet access for better detection, but this option tries to avoid it.
|
||||
# # For example, the offline mode will not try to resolve transitive dependencies in pom.xml when the dependency doesn't
|
||||
# # exist in the local repositories. It means a number of detected vulnerabilities might be fewer in offline mode.
|
||||
# # It would work if all the dependencies are in local.
|
||||
# # This option doesn’t affect DB download. You need to specify "skip-update" as well as "offline-scan" in an air-gapped environment.
|
||||
# offline_scan: false
|
||||
# #
|
||||
# # insecure The flag to skip verifying registry certificate
|
||||
# insecure: false
|
||||
# # github_token The GitHub access token to download Trivy DB
|
||||
# #
|
||||
# # Anonymous downloads from GitHub are subject to the limit of 60 requests per hour. Normally such rate limit is enough
|
||||
# # for production operations. If, for any reason, it's not enough, you could increase the rate limit to 5000
|
||||
# # requests per hour by specifying the GitHub access token. For more details on GitHub rate limiting please consult
|
||||
# # https://developer.github.com/v3/#rate-limiting
|
||||
# #
|
||||
# # timeout The duration to wait for scan completion.
|
||||
# # There is upper bound of 30 minutes defined in scan job. So if this `timeout` is larger than 30m0s, it will also timeout at 30m0s.
|
||||
# timeout: 5m0s
|
||||
# #
|
||||
# # You can create a GitHub token by following the instructions in
|
||||
# # https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line
|
||||
# #
|
||||
# # github_token: xxx
|
||||
{% endif %}
|
||||
|
||||
jobservice:
|
||||
# Maximum number of job workers in job service
|
||||
{% if jobservice is defined %}
|
||||
max_job_workers: {{ jobservice.max_job_workers }}
|
||||
# Maximum hours of task duration in job service, default 24
|
||||
{% if jobservice.max_job_duration_hours is defined %}
|
||||
max_job_duration_hours: {{ jobservice.max_job_duration_hours }}
|
||||
{% else %}
|
||||
max_job_duration_hours: 24
|
||||
{% endif %}
|
||||
# The jobLoggers backend name, only support "STD_OUTPUT", "FILE" and/or "DB"
|
||||
{% if jobservice.job_loggers is defined %}
|
||||
job_loggers:
|
||||
{% for job_logger in jobservice.job_loggers %}
|
||||
- {{job_logger}}
|
||||
{% endfor %}
|
||||
{% else %}
|
||||
job_loggers:
|
||||
- STD_OUTPUT
|
||||
- FILE
|
||||
# - DB
|
||||
{% endif %}
|
||||
# The jobLogger sweeper duration (ignored if `jobLogger` is `stdout`)
|
||||
{% if jobservice.logger_sweeper_duration is defined %}
|
||||
logger_sweeper_duration: {{ jobservice.logger_sweeper_duration }}
|
||||
{% else %}
|
||||
logger_sweeper_duration: 1
|
||||
{% endif %}
|
||||
{% else %}
|
||||
max_job_workers: 10
|
||||
max_job_duration_hours: 24
|
||||
# The jobLoggers backend name, only support "STD_OUTPUT", "FILE" and/or "DB"
|
||||
job_loggers:
|
||||
- STD_OUTPUT
|
||||
- FILE
|
||||
# - DB
|
||||
# The jobLogger sweeper duration (ignored if `jobLogger` is `stdout`)
|
||||
logger_sweeper_duration: 1
|
||||
{% endif %}
|
||||
|
||||
notification:
|
||||
# Maximum retry count for webhook job
|
||||
{% if notification is defined %}
|
||||
webhook_job_max_retry: {{ notification.webhook_job_max_retry}}
|
||||
# HTTP client timeout for webhook job
|
||||
{% if notification.webhook_job_http_client_timeout is defined %}
|
||||
webhook_job_http_client_timeout: {{ notification.webhook_job_http_client_timeout }}
|
||||
{% else %}
|
||||
webhook_job_http_client_timeout: 3 #seconds
|
||||
{% endif %}
|
||||
{% else %}
|
||||
webhook_job_max_retry: 3
|
||||
# HTTP client timeout for webhook job
|
||||
webhook_job_http_client_timeout: 3 #seconds
|
||||
{% endif %}
|
||||
|
||||
# Log configurations
|
||||
log:
|
||||
# options are debug, info, warning, error, fatal
|
||||
{% if log is defined %}
|
||||
level: {{ log.level }}
|
||||
# configs for logs in local storage
|
||||
local:
|
||||
# Log files are rotated log_rotate_count times before being removed. If count is 0, old versions are removed rather than rotated.
|
||||
rotate_count: {{ log.local.rotate_count }}
|
||||
# Log files are rotated only if they grow bigger than log_rotate_size bytes. If size is followed by k, the size is assumed to be in kilobytes.
|
||||
# If the M is used, the size is in megabytes, and if G is used, the size is in gigabytes. So size 100, size 100k, size 100M and size 100G
|
||||
# are all valid.
|
||||
rotate_size: {{ log.local.rotate_size }}
|
||||
# The directory on your host that store log
|
||||
location: {{ log.local.location }}
|
||||
{% if log.external_endpoint is defined %}
|
||||
external_endpoint:
|
||||
# protocol used to transmit log to external endpoint, options is tcp or udp
|
||||
protocol: {{ log.external_endpoint.protocol }}
|
||||
# The host of external endpoint
|
||||
host: {{ log.external_endpoint.host }}
|
||||
# Port of external endpoint
|
||||
port: {{ log.external_endpoint.port }}
|
||||
{% else %}
|
||||
# Uncomment following lines to enable external syslog endpoint.
|
||||
# external_endpoint:
|
||||
# # protocol used to transmit log to external endpoint, options is tcp or udp
|
||||
# protocol: tcp
|
||||
# # The host of external endpoint
|
||||
# host: localhost
|
||||
# # Port of external endpoint
|
||||
# port: 5140
|
||||
{% endif %}
|
||||
{% else %}
|
||||
level: info
|
||||
# configs for logs in local storage
|
||||
local:
|
||||
# Log files are rotated log_rotate_count times before being removed. If count is 0, old versions are removed rather than rotated.
|
||||
rotate_count: 50
|
||||
# Log files are rotated only if they grow bigger than log_rotate_size bytes. If size is followed by k, the size is assumed to be in kilobytes.
|
||||
# If the M is used, the size is in megabytes, and if G is used, the size is in gigabytes. So size 100, size 100k, size 100M and size 100G
|
||||
# are all valid.
|
||||
rotate_size: 200M
|
||||
# The directory on your host that store log
|
||||
location: /var/log/harbor
|
||||
|
||||
# Uncomment following lines to enable external syslog endpoint.
|
||||
# external_endpoint:
|
||||
# # protocol used to transmit log to external endpoint, options is tcp or udp
|
||||
# protocol: tcp
|
||||
# # The host of external endpoint
|
||||
# host: localhost
|
||||
# # Port of external endpoint
|
||||
# port: 5140
|
||||
{% endif %}
|
||||
|
||||
|
||||
#This attribute is for migrator to detect the version of the .cfg file, DO NOT MODIFY!
|
||||
_version: 2.14.0
|
||||
{% if external_database is defined %}
|
||||
# Uncomment external_database if using external database.
|
||||
external_database:
|
||||
harbor:
|
||||
host: {{ external_database.harbor.host }}
|
||||
port: {{ external_database.harbor.port }}
|
||||
db_name: {{ external_database.harbor.db_name }}
|
||||
username: {{ external_database.harbor.username }}
|
||||
password: {{ external_database.harbor.password }}
|
||||
ssl_mode: {{ external_database.harbor.ssl_mode }}
|
||||
max_idle_conns: {{ external_database.harbor.max_idle_conns}}
|
||||
max_open_conns: {{ external_database.harbor.max_open_conns}}
|
||||
{% else %}
|
||||
# Uncomment external_database if using external database.
|
||||
# external_database:
|
||||
# harbor:
|
||||
# host: harbor_db_host
|
||||
# port: harbor_db_port
|
||||
# db_name: harbor_db_name
|
||||
# username: harbor_db_username
|
||||
# password: harbor_db_password
|
||||
# ssl_mode: disable
|
||||
# max_idle_conns: 2
|
||||
# max_open_conns: 0
|
||||
{% endif %}
|
||||
|
||||
{% if redis is defined %}
|
||||
redis:
|
||||
# # db_index 0 is for core, it's unchangeable
|
||||
{% if redis.registry_db_index is defined %}
|
||||
registry_db_index: {{ redis.registry_db_index }}
|
||||
{% else %}
|
||||
# # registry_db_index: 1
|
||||
{% endif %}
|
||||
{% if redis.jobservice_db_index is defined %}
|
||||
jobservice_db_index: {{ redis.jobservice_db_index }}
|
||||
{% else %}
|
||||
# # jobservice_db_index: 2
|
||||
{% endif %}
|
||||
{% if redis.trivy_db_index is defined %}
|
||||
trivy_db_index: {{ redis.trivy_db_index }}
|
||||
{% else %}
|
||||
# # trivy_db_index: 5
|
||||
{% endif %}
|
||||
{% if redis.harbor_db_index is defined %}
|
||||
harbor_db_index: {{ redis.harbor_db_index }}
|
||||
{% else %}
|
||||
# # it's optional, the db for harbor business misc, by default is 0, uncomment it if you want to change it.
|
||||
# # harbor_db_index: 6
|
||||
{% endif %}
|
||||
{% if redis.cache_layer_db_index is defined %}
|
||||
cache_layer_db_index: {{ redis.cache_layer_db_index }}
|
||||
{% else %}
|
||||
# # it's optional, the db for harbor cache layer, by default is 0, uncomment it if you want to change it.
|
||||
# # cache_layer_db_index: 7
|
||||
{% endif %}
|
||||
{% else %}
|
||||
# Uncomment redis if need to customize redis db
|
||||
# redis:
|
||||
# # db_index 0 is for core, it's unchangeable
|
||||
# # registry_db_index: 1
|
||||
# # jobservice_db_index: 2
|
||||
# # trivy_db_index: 5
|
||||
# # it's optional, the db for harbor business misc, by default is 0, uncomment it if you want to change it.
|
||||
# # harbor_db_index: 6
|
||||
# # it's optional, the db for harbor cache layer, by default is 0, uncomment it if you want to change it.
|
||||
# # cache_layer_db_index: 7
|
||||
{% endif %}
|
||||
|
||||
{% if external_redis is defined %}
|
||||
external_redis:
|
||||
# support redis, redis+sentinel
|
||||
# host for redis: <host_redis>:<port_redis>
|
||||
# host for redis+sentinel:
|
||||
# <host_sentinel1>:<port_sentinel1>,<host_sentinel2>:<port_sentinel2>,<host_sentinel3>:<port_sentinel3>
|
||||
host: {{ external_redis.host }}
|
||||
password: {{ external_redis.password }}
|
||||
# Redis AUTH command was extended in Redis 6, it is possible to use it in the two-arguments AUTH <username> <password> form.
|
||||
{% if external_redis.username is defined %}
|
||||
username: {{ external_redis.username }}
|
||||
{% else %}
|
||||
# username:
|
||||
{% endif %}
|
||||
# sentinel_master_set must be set to support redis+sentinel
|
||||
#sentinel_master_set:
|
||||
{% if external_redis.tlsOptions is defined %}
|
||||
# # tls configuration for redis connection
|
||||
# # only server-authentication is supported
|
||||
# # mtls for redis connection is not supported
|
||||
# # tls connection will be disable by default
|
||||
tlsOptions:
|
||||
enable: {{ external_redis.tlsOptions.enable }}
|
||||
# if it is a self-signed ca, please set the ca path specifically.
|
||||
{% if external_redis.tlsOptions.rootCA is defined %}
|
||||
rootCA: {{ external_redis.tlsOptions.rootCA }}
|
||||
{% else %}
|
||||
# rootCA:
|
||||
{% endif %}
|
||||
{% else %}
|
||||
# # tls configuration for redis connection
|
||||
# # only server-authentication is supported
|
||||
# # mtls for redis connection is not supported
|
||||
# # tls connection will be disable by default
|
||||
# tlsOptions:
|
||||
# enable: false
|
||||
# # if it is a self-signed ca, please set the ca path specifically.
|
||||
# rootCA:
|
||||
{% endif %}
|
||||
# db_index 0 is for core, it's unchangeable
|
||||
registry_db_index: {{ external_redis.registry_db_index }}
|
||||
jobservice_db_index: {{ external_redis.jobservice_db_index }}
|
||||
trivy_db_index: 5
|
||||
idle_timeout_seconds: 30
|
||||
{% if external_redis.harbor_db_index is defined %}
|
||||
harbor_db_index: {{ redis.harbor_db_index }}
|
||||
{% else %}
|
||||
# # it's optional, the db for harbor business misc, by default is 0, uncomment it if you want to change it.
|
||||
# # harbor_db_index: 6
|
||||
{% endif %}
|
||||
{% if external_redis.cache_layer_db_index is defined %}
|
||||
cache_layer_db_index: {{ redis.cache_layer_db_index }}
|
||||
{% else %}
|
||||
# # it's optional, the db for harbor cache layer, by default is 0, uncomment it if you want to change it.
|
||||
# # cache_layer_db_index: 7
|
||||
{% endif %}
|
||||
{% else %}
|
||||
# Uncomments external_redis if using external Redis server
|
||||
# external_redis:
|
||||
# # support redis, redis+sentinel
|
||||
# # host for redis: <host_redis>:<port_redis>
|
||||
# # host for redis+sentinel:
|
||||
# # <host_sentinel1>:<port_sentinel1>,<host_sentinel2>:<port_sentinel2>,<host_sentinel3>:<port_sentinel3>
|
||||
# host: redis:6379
|
||||
# password:
|
||||
# # Redis AUTH command was extended in Redis 6, it is possible to use it in the two-arguments AUTH <username> <password> form.
|
||||
# # username:
|
||||
# # sentinel_master_set must be set to support redis+sentinel
|
||||
# #sentinel_master_set:
|
||||
# # tls configuration for redis connection
|
||||
# # only server-authentication is supported
|
||||
# # mtls for redis connection is not supported
|
||||
# # tls connection will be disable by default
|
||||
# tlsOptions:
|
||||
# enable: false
|
||||
# # if it is a self-signed ca, please set the ca path specifically.
|
||||
# rootCA:
|
||||
# # db_index 0 is for core, it's unchangeable
|
||||
# registry_db_index: 1
|
||||
# jobservice_db_index: 2
|
||||
# trivy_db_index: 5
|
||||
# idle_timeout_seconds: 30
|
||||
# # it's optional, the db for harbor business misc, by default is 0, uncomment it if you want to change it.
|
||||
# # harbor_db_index: 6
|
||||
# # it's optional, the db for harbor cache layer, by default is 0, uncomment it if you want to change it.
|
||||
# # cache_layer_db_index: 7
|
||||
{% endif %}
|
||||
|
||||
{% if uaa is defined %}
|
||||
# Uncomment uaa for trusting the certificate of uaa instance that is hosted via self-signed cert.
|
||||
uaa:
|
||||
ca_file: {{ uaa.ca_file }}
|
||||
{% else %}
|
||||
# Uncomment uaa for trusting the certificate of uaa instance that is hosted via self-signed cert.
|
||||
# uaa:
|
||||
# ca_file: /path/to/ca
|
||||
{% endif %}
|
||||
|
||||
|
||||
# Global proxy
|
||||
# Config http proxy for components, e.g. http://my.proxy.com:3128
|
||||
# Components doesn't need to connect to each others via http proxy.
|
||||
# Remove component from `components` array if want disable proxy
|
||||
# for it. If you want use proxy for replication, MUST enable proxy
|
||||
# for core and jobservice, and set `http_proxy` and `https_proxy`.
|
||||
# Add domain to the `no_proxy` field, when you want disable proxy
|
||||
# for some special registry.
|
||||
{% if proxy is defined %}
|
||||
proxy:
|
||||
http_proxy: {{ proxy.http_proxy or ''}}
|
||||
https_proxy: {{ proxy.https_proxy or ''}}
|
||||
no_proxy: {{ proxy.no_proxy or ''}}
|
||||
{% if proxy.components is defined %}
|
||||
components:
|
||||
{% for component in proxy.components %}
|
||||
{% if component != 'clair' %}
|
||||
- {{component}}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% else %}
|
||||
proxy:
|
||||
http_proxy:
|
||||
https_proxy:
|
||||
no_proxy:
|
||||
components:
|
||||
- core
|
||||
- jobservice
|
||||
- trivy
|
||||
{% endif %}
|
||||
|
||||
{% if metric is defined %}
|
||||
metric:
|
||||
enabled: {{ metric.enabled }}
|
||||
port: {{ metric.port }}
|
||||
path: {{ metric.path }}
|
||||
{% else %}
|
||||
# metric:
|
||||
# enabled: false
|
||||
# port: 9090
|
||||
# path: /metrics
|
||||
{% endif %}
|
||||
|
||||
# Trace related config
|
||||
# only can enable one trace provider(jaeger or otel) at the same time,
|
||||
# and when using jaeger as provider, can only enable it with agent mode or collector mode.
|
||||
# if using jaeger collector mode, uncomment endpoint and uncomment username, password if needed
|
||||
# if using jaeger agetn mode uncomment agent_host and agent_port
|
||||
{% if trace is defined %}
|
||||
trace:
|
||||
enabled: {{ trace.enabled | lower}}
|
||||
sample_rate: {{ trace.sample_rate }}
|
||||
# # namespace used to differentiate different harbor services
|
||||
{% if trace.namespace is defined %}
|
||||
namespace: {{ trace.namespace }}
|
||||
{% else %}
|
||||
# namespace:
|
||||
{% endif %}
|
||||
# # attributes is a key value dict contains user defined attributes used to initialize trace provider
|
||||
{% if trace.attributes is defined%}
|
||||
attributes:
|
||||
{% for name, value in trace.attributes.items() %}
|
||||
{{name}}: {{value}}
|
||||
{% endfor %}
|
||||
{% else %}
|
||||
# attributes:
|
||||
# application: harbor
|
||||
{% endif %}
|
||||
{% if trace.jaeger is defined%}
|
||||
jaeger:
|
||||
endpoint: {{trace.jaeger.endpoint or '' }}
|
||||
username: {{trace.jaeger.username or ''}}
|
||||
password: {{trace.jaeger.password or ''}}
|
||||
agent_host: {{trace.jaeger.agent_host or ''}}
|
||||
agent_port: {{trace.jaeger.agent_port or ''}}
|
||||
{% else %}
|
||||
# jaeger:
|
||||
# endpoint:
|
||||
# username:
|
||||
# password:
|
||||
# agent_host:
|
||||
# agent_port:
|
||||
{% endif %}
|
||||
{% if trace. otel is defined %}
|
||||
otel:
|
||||
endpoint: {{trace.otel.endpoint or '' }}
|
||||
url_path: {{trace.otel.url_path or '' }}
|
||||
compression: {{trace.otel.compression | lower }}
|
||||
insecure: {{trace.otel.insecure | lower }}
|
||||
timeout: {{trace.otel.timeout or '' }}
|
||||
{% else %}
|
||||
# otel:
|
||||
# endpoint: hostname:4318
|
||||
# url_path: /v1/traces
|
||||
# compression: false
|
||||
# insecure: true
|
||||
# # timeout is in seconds
|
||||
# timeout: 10
|
||||
{% endif%}
|
||||
{% else %}
|
||||
# trace:
|
||||
# enabled: true
|
||||
# # set sample_rate to 1 if you wanna sampling 100% of trace data; set 0.5 if you wanna sampling 50% of trace data, and so forth
|
||||
# sample_rate: 1
|
||||
# # # namespace used to differentiate different harbor services
|
||||
# # namespace:
|
||||
# # # attributes is a key value dict contains user defined attributes used to initialize trace provider
|
||||
# # attributes:
|
||||
# # application: harbor
|
||||
# # jaeger:
|
||||
# # endpoint: http://hostname:14268/api/traces
|
||||
# # username:
|
||||
# # password:
|
||||
# # agent_host: hostname
|
||||
# # agent_port: 6831
|
||||
# # otel:
|
||||
# # endpoint: hostname:4318
|
||||
# # url_path: /v1/traces
|
||||
# # compression: false
|
||||
# # insecure: true
|
||||
# # # timeout is in seconds
|
||||
# # timeout: 10
|
||||
{% endif %}
|
||||
|
||||
# enable purge _upload directories
|
||||
{% if upload_purging is defined %}
|
||||
upload_purging:
|
||||
enabled: {{ upload_purging.enabled | lower}}
|
||||
age: {{ upload_purging.age }}
|
||||
interval: {{ upload_purging.interval }}
|
||||
dryrun: {{ upload_purging.dryrun | lower}}
|
||||
{% else %}
|
||||
upload_purging:
|
||||
enabled: true
|
||||
# remove files in _upload directories which exist for a period of time, default is one week.
|
||||
age: 168h
|
||||
# the interval of the purge operations
|
||||
interval: 24h
|
||||
dryrun: false
|
||||
{% endif %}
|
||||
|
||||
# Cache layer related config
|
||||
{% if cache is defined %}
|
||||
cache:
|
||||
enabled: {{ cache.enabled | lower}}
|
||||
expire_hours: {{ cache.expire_hours }}
|
||||
{% else %}
|
||||
cache:
|
||||
enabled: false
|
||||
expire_hours: 24
|
||||
{% endif %}
|
||||
|
||||
# Harbor core configurations
|
||||
# Uncomment to enable the following harbor core related configuration items.
|
||||
{% if core is defined %}
|
||||
core:
|
||||
# The provider for updating project quota(usage), there are 2 options, redis or db,
|
||||
# by default is implemented by db but you can switch the updation via redis which
|
||||
# can improve the performance of high concurrent pushing to the same project,
|
||||
# and reduce the database connections spike and occupies.
|
||||
# By redis will bring up some delay for quota usage updation for display, so only
|
||||
# suggest switch provider to redis if you were ran into the db connections spike aroud
|
||||
# the scenario of high concurrent pushing to same project, no improvment for other scenes.
|
||||
quota_update_provider: {{ core.quota_update_provider }}
|
||||
{% else %}
|
||||
# core:
|
||||
# # The provider for updating project quota(usage), there are 2 options, redis or db,
|
||||
# # by default is implemented by db but you can switch the updation via redis which
|
||||
# # can improve the performance of high concurrent pushing to the same project,
|
||||
# # and reduce the database connections spike and occupies.
|
||||
# # By redis will bring up some delay for quota usage updation for display, so only
|
||||
# # suggest switch provider to redis if you were ran into the db connections spike around
|
||||
# # the scenario of high concurrent pushing to same project, no improvement for other scenes.
|
||||
# quota_update_provider: redis # Or db
|
||||
{% endif %}
|
||||
|
|
@ -4,7 +4,6 @@ _REDIS_URL_CORE={{redis_url_core}}
|
|||
{% if redis_url_harbor %}
|
||||
_REDIS_URL_HARBOR={{redis_url_harbor}}
|
||||
{% endif %}
|
||||
SYNC_QUOTA=true
|
||||
_REDIS_URL_REG={{redis_url_reg}}
|
||||
|
||||
LOG_LEVEL={{log_level}}
|
||||
|
|
@ -40,7 +39,8 @@ REGISTRY_CREDENTIAL_USERNAME={{registry_username}}
|
|||
REGISTRY_CREDENTIAL_PASSWORD={{registry_password}}
|
||||
CSRF_KEY={{csrf_key}}
|
||||
ROBOT_SCANNER_NAME_PREFIX={{scan_robot_prefix}}
|
||||
PERMITTED_REGISTRY_TYPES_FOR_PROXY_CACHE=docker-hub,harbor,azure-acr,ali-acr,aws-ecr,google-gcr,quay,docker-registry,github-ghcr,jfrog-artifactory
|
||||
PERMITTED_REGISTRY_TYPES_FOR_PROXY_CACHE=docker-hub,harbor,azure-acr,ali-acr,aws-ecr,google-gcr,docker-registry,github-ghcr,jfrog-artifactory
|
||||
REPLICATION_ADAPTER_WHITELIST=ali-acr,aws-ecr,azure-acr,docker-hub,docker-registry,github-ghcr,google-gcr,harbor,huawei-SWR,jfrog-artifactory,tencent-tcr,volcengine-cr
|
||||
|
||||
HTTP_PROXY={{core_http_proxy}}
|
||||
HTTPS_PROXY={{core_https_proxy}}
|
||||
|
|
@ -96,4 +96,4 @@ CACHE_EXPIRE_HOURS={{ cache.expire_hours }}
|
|||
|
||||
{% if core.quota_update_provider %}
|
||||
QUOTA_UPDATE_PROVIDER={{ core.quota_update_provider }}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
|
|
|||
|
|
@ -6,6 +6,8 @@ REGISTRY_CONTROLLER_URL={{registry_controller_url}}
|
|||
JOBSERVICE_WEBHOOK_JOB_MAX_RETRY={{notification_webhook_job_max_retry}}
|
||||
JOBSERVICE_WEBHOOK_JOB_HTTP_CLIENT_TIMEOUT={{notification_webhook_job_http_client_timeout}}
|
||||
|
||||
LOG_LEVEL={{log_level}}
|
||||
|
||||
{%if internal_tls.enabled %}
|
||||
INTERNAL_TLS_ENABLED=true
|
||||
INTERNAL_TLS_TRUST_CA_PATH=/harbor_cust_cert/harbor_internal_ca.crt
|
||||
|
|
@ -21,7 +23,6 @@ HTTPS_PROXY={{jobservice_https_proxy}}
|
|||
NO_PROXY={{jobservice_no_proxy}}
|
||||
REGISTRY_CREDENTIAL_USERNAME={{registry_username}}
|
||||
REGISTRY_CREDENTIAL_PASSWORD={{registry_password}}
|
||||
MAX_JOB_DURATION_SECONDS={{max_job_duration_seconds}}
|
||||
|
||||
{% if metric.enabled %}
|
||||
METRIC_NAMESPACE=harbor
|
||||
|
|
|
|||
|
|
@ -227,7 +227,6 @@ def parse_yaml_config(config_file_path, with_trivy):
|
|||
value = config_dict["max_job_duration_hours"]
|
||||
if not isinstance(value, int) or value < 24:
|
||||
config_dict["max_job_duration_hours"] = 24
|
||||
config_dict['max_job_duration_seconds'] = config_dict['max_job_duration_hours'] * 3600
|
||||
config_dict['job_loggers'] = js_config["job_loggers"]
|
||||
config_dict['logger_sweeper_duration'] = js_config["logger_sweeper_duration"]
|
||||
config_dict['jobservice_secret'] = generate_random_string(16)
|
||||
|
|
|
|||
|
|
@ -34,7 +34,6 @@ def prepare_job_service(config_dict):
|
|||
internal_tls=config_dict['internal_tls'],
|
||||
max_job_workers=config_dict['max_job_workers'],
|
||||
max_job_duration_hours=config_dict['max_job_duration_hours'],
|
||||
max_job_duration_seconds=config_dict['max_job_duration_seconds'],
|
||||
job_loggers=config_dict['job_loggers'],
|
||||
logger_sweeper_duration=config_dict['logger_sweeper_duration'],
|
||||
redis_url=config_dict['redis_url_js'],
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
FROM golang:1.23.8
|
||||
ARG golang_image
|
||||
FROM ${golang_image}
|
||||
|
||||
ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution
|
||||
ENV BUILDTAGS include_oss include_gcs
|
||||
|
|
|
|||
|
|
@ -14,6 +14,8 @@ fi
|
|||
|
||||
VERSION="$1"
|
||||
DISTRIBUTION_SRC="$2"
|
||||
GOBUILDIMAGE="$3"
|
||||
DOCKERNETWORK="$4"
|
||||
|
||||
set -e
|
||||
|
||||
|
|
@ -32,7 +34,7 @@ cd $cur
|
|||
|
||||
echo 'build the registry binary ...'
|
||||
cp Dockerfile.binary $TEMP
|
||||
docker build -f $TEMP/Dockerfile.binary -t registry-golang $TEMP
|
||||
docker build --network=$DOCKERNETWORK --build-arg golang_image=$GOBUILDIMAGE -f $TEMP/Dockerfile.binary -t registry-golang $TEMP
|
||||
|
||||
echo 'copy the registry binary to local...'
|
||||
ID=$(docker create registry-golang)
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
FROM golang:1.23.8
|
||||
ARG golang_image
|
||||
FROM ${golang_image}
|
||||
|
||||
ADD . /go/src/github.com/goharbor/harbor-scanner-trivy/
|
||||
WORKDIR /go/src/github.com/goharbor/harbor-scanner-trivy/
|
||||
|
|
|
|||
|
|
@ -8,6 +8,8 @@ if [ -z $1 ]; then
|
|||
fi
|
||||
|
||||
VERSION="$1"
|
||||
GOBUILDIMAGE="$2"
|
||||
DOCKERNETWORK="$3"
|
||||
|
||||
set -e
|
||||
|
||||
|
|
@ -19,9 +21,9 @@ TEMP=$(mktemp -d ${TMPDIR-/tmp}/trivy-adapter.XXXXXX)
|
|||
git clone https://github.com/goharbor/harbor-scanner-trivy.git $TEMP
|
||||
cd $TEMP; git checkout $VERSION; cd -
|
||||
|
||||
echo "Building Trivy adapter binary based on golang:1.23.8..."
|
||||
echo "Building Trivy adapter binary ..."
|
||||
cp Dockerfile.binary $TEMP
|
||||
docker build -f $TEMP/Dockerfile.binary -t trivy-adapter-golang $TEMP
|
||||
docker build --network=$DOCKERNETWORK --build-arg golang_image=$GOBUILDIMAGE -f $TEMP/Dockerfile.binary -t trivy-adapter-golang $TEMP
|
||||
|
||||
echo "Copying Trivy adapter binary from the container to the local directory..."
|
||||
ID=$(docker create trivy-adapter-golang)
|
||||
|
|
|
|||
|
|
@ -1,76 +1,56 @@
|
|||
linters-settings:
|
||||
gofmt:
|
||||
# Simplify code: gofmt with `-s` option.
|
||||
# Default: true
|
||||
simplify: false
|
||||
misspell:
|
||||
locale: US,UK
|
||||
goimports:
|
||||
local-prefixes: github.com/goharbor/harbor
|
||||
stylecheck:
|
||||
checks: [
|
||||
"ST1019", # Importing the same package multiple times.
|
||||
]
|
||||
goheader:
|
||||
template-path: copyright.tmpl
|
||||
|
||||
version: "2"
|
||||
linters:
|
||||
disable-all: true
|
||||
default: none
|
||||
enable:
|
||||
- gofmt
|
||||
- goheader
|
||||
- misspell
|
||||
- typecheck
|
||||
# - dogsled
|
||||
# - dupl
|
||||
# - depguard
|
||||
# - funlen
|
||||
# - goconst
|
||||
# - gocritic
|
||||
# - gocyclo
|
||||
# - goimports
|
||||
# - goprintffuncname
|
||||
- ineffassign
|
||||
# - nakedret
|
||||
# - nolintlint
|
||||
- revive
|
||||
- whitespace
|
||||
- bodyclose
|
||||
- errcheck
|
||||
# - gosec
|
||||
- gosimple
|
||||
- goimports
|
||||
- goheader
|
||||
- govet
|
||||
# - noctx
|
||||
# - rowserrcheck
|
||||
- ineffassign
|
||||
- misspell
|
||||
- revive
|
||||
- staticcheck
|
||||
- stylecheck
|
||||
# - unconvert
|
||||
# - unparam
|
||||
# - unused // disabled due to too many false positive check and limited support golang 1.19 https://github.com/dominikh/go-tools/issues/1282
|
||||
|
||||
run:
|
||||
skip-files:
|
||||
- ".*_test.go"
|
||||
- ".*test.go"
|
||||
skip-dirs:
|
||||
- "testing"
|
||||
timeout: 20m
|
||||
|
||||
issue:
|
||||
max-same-issues: 0
|
||||
max-per-linter: 0
|
||||
|
||||
issues:
|
||||
# Excluding configuration per-path, per-linter, per-text and per-source
|
||||
exclude-rules:
|
||||
# Exclude some linters from running on tests files.
|
||||
- path: _test\.go
|
||||
linters:
|
||||
- goimports
|
||||
- path: src/testing/*.go
|
||||
linters:
|
||||
- goimports
|
||||
- path: src/jobservice/mgt/mock_manager.go
|
||||
linters:
|
||||
- goimports
|
||||
- whitespace
|
||||
settings:
|
||||
goheader:
|
||||
template-path: copyright.tmpl
|
||||
misspell:
|
||||
locale: US,UK
|
||||
staticcheck:
|
||||
checks:
|
||||
- ST1019
|
||||
exclusions:
|
||||
generated: lax
|
||||
presets:
|
||||
- comments
|
||||
- common-false-positives
|
||||
- legacy
|
||||
- std-error-handling
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
- .*_test\.go
|
||||
- .*test\.go
|
||||
- testing
|
||||
- src/jobservice/mgt/mock_manager.go
|
||||
formatters:
|
||||
enable:
|
||||
- gofmt
|
||||
- goimports
|
||||
settings:
|
||||
gofmt:
|
||||
simplify: false
|
||||
goimports:
|
||||
local-prefixes:
|
||||
- github.com/goharbor/harbor
|
||||
exclusions:
|
||||
generated: lax
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
- .*_test\.go
|
||||
- .*test\.go
|
||||
- testing
|
||||
- src/jobservice/mgt/mock_manager.go
|
||||
|
|
|
|||
|
|
@ -21,6 +21,7 @@ import (
|
|||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/beego/beego/v2/core/validation"
|
||||
"github.com/beego/beego/v2/server/web"
|
||||
|
|
@ -78,7 +79,7 @@ func (b *BaseAPI) RenderError(code int, text string) {
|
|||
}
|
||||
|
||||
// DecodeJSONReq decodes a json request
|
||||
func (b *BaseAPI) DecodeJSONReq(v interface{}) error {
|
||||
func (b *BaseAPI) DecodeJSONReq(v any) error {
|
||||
err := json.Unmarshal(b.Ctx.Input.CopyBody(1<<35), v)
|
||||
if err != nil {
|
||||
log.Errorf("Error while decoding the json request, error: %v, %v",
|
||||
|
|
@ -89,7 +90,7 @@ func (b *BaseAPI) DecodeJSONReq(v interface{}) error {
|
|||
}
|
||||
|
||||
// Validate validates v if it implements interface validation.ValidFormer
|
||||
func (b *BaseAPI) Validate(v interface{}) (bool, error) {
|
||||
func (b *BaseAPI) Validate(v any) (bool, error) {
|
||||
validator := validation.Validation{}
|
||||
isValid, err := validator.Valid(v)
|
||||
if err != nil {
|
||||
|
|
@ -98,17 +99,17 @@ func (b *BaseAPI) Validate(v interface{}) (bool, error) {
|
|||
}
|
||||
|
||||
if !isValid {
|
||||
message := ""
|
||||
var message strings.Builder
|
||||
for _, e := range validator.Errors {
|
||||
message += fmt.Sprintf("%s %s \n", e.Field, e.Message)
|
||||
message.WriteString(fmt.Sprintf("%s %s \n", e.Field, e.Message))
|
||||
}
|
||||
return false, errors.New(message)
|
||||
return false, errors.New(message.String())
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// DecodeJSONReqAndValidate does both decoding and validation
|
||||
func (b *BaseAPI) DecodeJSONReqAndValidate(v interface{}) (bool, error) {
|
||||
func (b *BaseAPI) DecodeJSONReqAndValidate(v any) (bool, error) {
|
||||
if err := b.DecodeJSONReq(v); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -252,4 +252,7 @@ const (
|
|||
|
||||
// Global Leeway used for token validation
|
||||
JwtLeeway = 60 * time.Second
|
||||
|
||||
// The replication adapter whitelist
|
||||
ReplicationAdapterWhiteList = "REPLICATION_ADAPTER_WHITELIST"
|
||||
)
|
||||
|
|
|
|||
|
|
@ -144,6 +144,6 @@ func (l *mLogger) Verbose() bool {
|
|||
}
|
||||
|
||||
// Printf ...
|
||||
func (l *mLogger) Printf(format string, v ...interface{}) {
|
||||
func (l *mLogger) Printf(format string, v ...any) {
|
||||
l.logger.Infof(format, v...)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ import (
|
|||
|
||||
var testCtx context.Context
|
||||
|
||||
func execUpdate(o orm.TxOrmer, sql string, params ...interface{}) error {
|
||||
func execUpdate(o orm.TxOrmer, sql string, params ...any) error {
|
||||
p, err := o.Raw(sql).Prepare()
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
|||
|
|
@ -27,7 +27,7 @@ func TestMaxOpenConns(t *testing.T) {
|
|||
|
||||
queryNum := 200
|
||||
results := make([]bool, queryNum)
|
||||
for i := 0; i < queryNum; i++ {
|
||||
for i := range queryNum {
|
||||
wg.Add(1)
|
||||
go func(i int) {
|
||||
defer wg.Done()
|
||||
|
|
|
|||
|
|
@ -142,7 +142,7 @@ func ArrayEqual(arrayA, arrayB []int) bool {
|
|||
return false
|
||||
}
|
||||
size := len(arrayA)
|
||||
for i := 0; i < size; i++ {
|
||||
for i := range size {
|
||||
if arrayA[i] != arrayB[i] {
|
||||
return false
|
||||
}
|
||||
|
|
|
|||
|
|
@ -69,7 +69,7 @@ func (c *Client) Do(req *http.Request) (*http.Response, error) {
|
|||
}
|
||||
|
||||
// Get ...
|
||||
func (c *Client) Get(url string, v ...interface{}) error {
|
||||
func (c *Client) Get(url string, v ...any) error {
|
||||
req, err := http.NewRequest(http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -98,7 +98,7 @@ func (c *Client) Head(url string) error {
|
|||
}
|
||||
|
||||
// Post ...
|
||||
func (c *Client) Post(url string, v ...interface{}) error {
|
||||
func (c *Client) Post(url string, v ...any) error {
|
||||
var reader io.Reader
|
||||
if len(v) > 0 {
|
||||
if r, ok := v[0].(io.Reader); ok {
|
||||
|
|
@ -123,7 +123,7 @@ func (c *Client) Post(url string, v ...interface{}) error {
|
|||
}
|
||||
|
||||
// Put ...
|
||||
func (c *Client) Put(url string, v ...interface{}) error {
|
||||
func (c *Client) Put(url string, v ...any) error {
|
||||
var reader io.Reader
|
||||
if len(v) > 0 {
|
||||
data, err := json.Marshal(v[0])
|
||||
|
|
@ -176,7 +176,7 @@ func (c *Client) do(req *http.Request) ([]byte, error) {
|
|||
|
||||
// GetAndIteratePagination iterates the pagination header and returns all resources
|
||||
// The parameter "v" must be a pointer to a slice
|
||||
func (c *Client) GetAndIteratePagination(endpoint string, v interface{}) error {
|
||||
func (c *Client) GetAndIteratePagination(endpoint string, v any) error {
|
||||
url, err := url.Parse(endpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@
|
|||
package models
|
||||
|
||||
// Parameters for job execution.
|
||||
type Parameters map[string]interface{}
|
||||
type Parameters map[string]any
|
||||
|
||||
// JobRequest is the request of launching a job.
|
||||
type JobRequest struct {
|
||||
|
|
@ -96,5 +96,5 @@ type JobStatusChange struct {
|
|||
// Message is designed for sub/pub messages
|
||||
type Message struct {
|
||||
Event string
|
||||
Data interface{} // generic format
|
||||
Data any // generic format
|
||||
}
|
||||
|
|
|
|||
|
|
@ -119,7 +119,7 @@ func BenchmarkProjectEvaluator(b *testing.B) {
|
|||
resource := NewNamespace(public.ProjectID).Resource(rbac.ResourceRepository)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
evaluator.HasPermission(context.TODO(), resource, rbac.ActionPull)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -43,7 +43,7 @@ func (ns *projectNamespace) Resource(subresources ...types.Resource) types.Resou
|
|||
return types.Resource(fmt.Sprintf("/project/%d", ns.projectID)).Subresource(subresources...)
|
||||
}
|
||||
|
||||
func (ns *projectNamespace) Identity() interface{} {
|
||||
func (ns *projectNamespace) Identity() any {
|
||||
return ns.projectID
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -162,6 +162,7 @@ var (
|
|||
{Resource: rbac.ResourceRobot, Action: rbac.ActionRead},
|
||||
{Resource: rbac.ResourceRobot, Action: rbac.ActionList},
|
||||
|
||||
{Resource: rbac.ResourceNotificationPolicy, Action: rbac.ActionRead},
|
||||
{Resource: rbac.ResourceNotificationPolicy, Action: rbac.ActionList},
|
||||
|
||||
{Resource: rbac.ResourceScan, Action: rbac.ActionCreate},
|
||||
|
|
|
|||
|
|
@ -38,7 +38,7 @@ func (ns *systemNamespace) Resource(subresources ...types.Resource) types.Resour
|
|||
return types.Resource("/system/").Subresource(subresources...)
|
||||
}
|
||||
|
||||
func (ns *systemNamespace) Identity() interface{} {
|
||||
func (ns *systemNamespace) Identity() any {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -31,8 +31,8 @@ func FromRequest(req *http.Request) string {
|
|||
return ""
|
||||
}
|
||||
auth := req.Header.Get("Authorization")
|
||||
if strings.HasPrefix(auth, HeaderPrefix) {
|
||||
return strings.TrimPrefix(auth, HeaderPrefix)
|
||||
if after, ok := strings.CutPrefix(auth, HeaderPrefix); ok {
|
||||
return after
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
|
|
|||
|
|
@ -72,7 +72,7 @@ func (s *SecurityContext) IsSolutionUser() bool {
|
|||
|
||||
// Can returns true only when requesting pull/push operation against the specific project
|
||||
func (s *SecurityContext) Can(ctx context.Context, action types.Action, resource types.Resource) bool {
|
||||
if !(action == rbac.ActionPull || action == rbac.ActionPush) {
|
||||
if !(action == rbac.ActionPull || action == rbac.ActionPush || action == rbac.ActionDelete) {
|
||||
log.Debugf("unauthorized for action %s", action)
|
||||
return false
|
||||
}
|
||||
|
|
|
|||
|
|
@ -63,8 +63,8 @@ func (p *proxyCacheSecretTestSuite) TestIsSolutionUser() {
|
|||
}
|
||||
|
||||
func (p *proxyCacheSecretTestSuite) TestCan() {
|
||||
// the action isn't pull/push
|
||||
action := rbac.ActionDelete
|
||||
// the action isn't pull/push/delete
|
||||
action := rbac.ActionUpdate
|
||||
resource := project.NewNamespace(1).Resource(rbac.ResourceRepository)
|
||||
p.False(p.sc.Can(context.TODO(), action, resource))
|
||||
|
||||
|
|
|
|||
|
|
@ -63,7 +63,7 @@ func (t *tokenSecurityCtx) GetMyProjects() ([]*models.Project, error) {
|
|||
return []*models.Project{}, nil
|
||||
}
|
||||
|
||||
func (t *tokenSecurityCtx) GetProjectRoles(_ interface{}) []int {
|
||||
func (t *tokenSecurityCtx) GetProjectRoles(_ any) []int {
|
||||
return []int{}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ import (
|
|||
"github.com/goharbor/harbor/src/common"
|
||||
)
|
||||
|
||||
var defaultConfig = map[string]interface{}{
|
||||
var defaultConfig = map[string]any{
|
||||
common.ExtEndpoint: "https://host01.com",
|
||||
common.AUTHMode: common.DBAuth,
|
||||
common.DatabaseType: "postgresql",
|
||||
|
|
@ -66,6 +66,6 @@ var defaultConfig = map[string]interface{}{
|
|||
}
|
||||
|
||||
// GetDefaultConfigMap returns the default config map for easier modification.
|
||||
func GetDefaultConfigMap() map[string]interface{} {
|
||||
func GetDefaultConfigMap() map[string]any {
|
||||
return defaultConfig
|
||||
}
|
||||
|
|
|
|||
|
|
@ -30,7 +30,7 @@ type GCResult struct {
|
|||
}
|
||||
|
||||
// NewRegistryCtl returns a mock registry server
|
||||
func NewRegistryCtl(_ map[string]interface{}) (*httptest.Server, error) {
|
||||
func NewRegistryCtl(_ map[string]any) (*httptest.Server, error) {
|
||||
m := []*RequestHandlerMapping{}
|
||||
|
||||
gcr := GCResult{true, "hello-world", time.Now(), time.Now()}
|
||||
|
|
|
|||
|
|
@ -94,9 +94,9 @@ func NewServer(mappings ...*RequestHandlerMapping) *httptest.Server {
|
|||
}
|
||||
|
||||
// GetUnitTestConfig ...
|
||||
func GetUnitTestConfig() map[string]interface{} {
|
||||
func GetUnitTestConfig() map[string]any {
|
||||
ipAddress := os.Getenv("IP")
|
||||
return map[string]interface{}{
|
||||
return map[string]any{
|
||||
common.ExtEndpoint: fmt.Sprintf("https://%s", ipAddress),
|
||||
common.AUTHMode: "db_auth",
|
||||
common.DatabaseType: "postgresql",
|
||||
|
|
@ -130,7 +130,7 @@ func GetUnitTestConfig() map[string]interface{} {
|
|||
}
|
||||
|
||||
// TraceCfgMap ...
|
||||
func TraceCfgMap(cfgs map[string]interface{}) {
|
||||
func TraceCfgMap(cfgs map[string]any) {
|
||||
var keys []string
|
||||
for k := range cfgs {
|
||||
keys = append(keys, k)
|
||||
|
|
|
|||
|
|
@ -89,7 +89,7 @@ type SearchUserEntry struct {
|
|||
ExtID string `json:"externalId"`
|
||||
UserName string `json:"userName"`
|
||||
Emails []SearchUserEmailEntry `json:"emails"`
|
||||
Groups []interface{}
|
||||
Groups []any
|
||||
}
|
||||
|
||||
// SearchUserRes is the struct to parse the result of search user API of UAA
|
||||
|
|
|
|||
|
|
@ -75,7 +75,7 @@ func GenerateRandomStringWithLen(length int) string {
|
|||
if err != nil {
|
||||
log.Warningf("Error reading random bytes: %v", err)
|
||||
}
|
||||
for i := 0; i < length; i++ {
|
||||
for i := range length {
|
||||
result[i] = chars[int(result[i])%l]
|
||||
}
|
||||
return string(result)
|
||||
|
|
@ -140,7 +140,7 @@ func ParseTimeStamp(timestamp string) (*time.Time, error) {
|
|||
}
|
||||
|
||||
// ConvertMapToStruct is used to fill the specified struct with map.
|
||||
func ConvertMapToStruct(object interface{}, values interface{}) error {
|
||||
func ConvertMapToStruct(object any, values any) error {
|
||||
if object == nil {
|
||||
return errors.New("nil struct is not supported")
|
||||
}
|
||||
|
|
@ -158,7 +158,7 @@ func ConvertMapToStruct(object interface{}, values interface{}) error {
|
|||
}
|
||||
|
||||
// ParseProjectIDOrName parses value to ID(int64) or name(string)
|
||||
func ParseProjectIDOrName(value interface{}) (int64, string, error) {
|
||||
func ParseProjectIDOrName(value any) (int64, string, error) {
|
||||
if value == nil {
|
||||
return 0, "", errors.New("harborIDOrName is nil")
|
||||
}
|
||||
|
|
@ -177,7 +177,7 @@ func ParseProjectIDOrName(value interface{}) (int64, string, error) {
|
|||
}
|
||||
|
||||
// SafeCastString -- cast an object to string safely
|
||||
func SafeCastString(value interface{}) string {
|
||||
func SafeCastString(value any) string {
|
||||
if result, ok := value.(string); ok {
|
||||
return result
|
||||
}
|
||||
|
|
@ -185,7 +185,7 @@ func SafeCastString(value interface{}) string {
|
|||
}
|
||||
|
||||
// SafeCastInt --
|
||||
func SafeCastInt(value interface{}) int {
|
||||
func SafeCastInt(value any) int {
|
||||
if result, ok := value.(int); ok {
|
||||
return result
|
||||
}
|
||||
|
|
@ -193,7 +193,7 @@ func SafeCastInt(value interface{}) int {
|
|||
}
|
||||
|
||||
// SafeCastBool --
|
||||
func SafeCastBool(value interface{}) bool {
|
||||
func SafeCastBool(value any) bool {
|
||||
if result, ok := value.(bool); ok {
|
||||
return result
|
||||
}
|
||||
|
|
@ -201,7 +201,7 @@ func SafeCastBool(value interface{}) bool {
|
|||
}
|
||||
|
||||
// SafeCastFloat64 --
|
||||
func SafeCastFloat64(value interface{}) float64 {
|
||||
func SafeCastFloat64(value any) float64 {
|
||||
if result, ok := value.(float64); ok {
|
||||
return result
|
||||
}
|
||||
|
|
@ -214,9 +214,9 @@ func TrimLower(str string) string {
|
|||
}
|
||||
|
||||
// GetStrValueOfAnyType return string format of any value, for map, need to convert to json
|
||||
func GetStrValueOfAnyType(value interface{}) string {
|
||||
func GetStrValueOfAnyType(value any) string {
|
||||
var strVal string
|
||||
if _, ok := value.(map[string]interface{}); ok {
|
||||
if _, ok := value.(map[string]any); ok {
|
||||
b, err := json.Marshal(value)
|
||||
if err != nil {
|
||||
log.Errorf("can not marshal json object, error %v", err)
|
||||
|
|
@ -237,18 +237,18 @@ func GetStrValueOfAnyType(value interface{}) string {
|
|||
}
|
||||
|
||||
// IsIllegalLength ...
|
||||
func IsIllegalLength(s string, min int, max int) bool {
|
||||
if min == -1 {
|
||||
return (len(s) > max)
|
||||
func IsIllegalLength(s string, minVal int, maxVal int) bool {
|
||||
if minVal == -1 {
|
||||
return (len(s) > maxVal)
|
||||
}
|
||||
if max == -1 {
|
||||
return (len(s) <= min)
|
||||
if maxVal == -1 {
|
||||
return (len(s) <= minVal)
|
||||
}
|
||||
return (len(s) < min || len(s) > max)
|
||||
return (len(s) < minVal || len(s) > maxVal)
|
||||
}
|
||||
|
||||
// ParseJSONInt ...
|
||||
func ParseJSONInt(value interface{}) (int, bool) {
|
||||
func ParseJSONInt(value any) (int, bool) {
|
||||
switch v := value.(type) {
|
||||
case float64:
|
||||
return int(v), true
|
||||
|
|
@ -337,13 +337,3 @@ func MostMatchSorter(a, b string, matchWord string) bool {
|
|||
func IsLocalPath(path string) bool {
|
||||
return len(path) == 0 || (strings.HasPrefix(path, "/") && !strings.HasPrefix(path, "//"))
|
||||
}
|
||||
|
||||
// StringInSlice check if the string is in the slice
|
||||
func StringInSlice(str string, slice []string) bool {
|
||||
for _, s := range slice {
|
||||
if s == str {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
|
|
|||
|
|
@ -216,7 +216,7 @@ type testingStruct struct {
|
|||
}
|
||||
|
||||
func TestConvertMapToStruct(t *testing.T) {
|
||||
dataMap := make(map[string]interface{})
|
||||
dataMap := make(map[string]any)
|
||||
dataMap["Name"] = "testing"
|
||||
dataMap["Count"] = 100
|
||||
|
||||
|
|
@ -232,7 +232,7 @@ func TestConvertMapToStruct(t *testing.T) {
|
|||
|
||||
func TestSafeCastString(t *testing.T) {
|
||||
type args struct {
|
||||
value interface{}
|
||||
value any
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
|
|
@ -254,7 +254,7 @@ func TestSafeCastString(t *testing.T) {
|
|||
|
||||
func TestSafeCastBool(t *testing.T) {
|
||||
type args struct {
|
||||
value interface{}
|
||||
value any
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
|
|
@ -276,7 +276,7 @@ func TestSafeCastBool(t *testing.T) {
|
|||
|
||||
func TestSafeCastInt(t *testing.T) {
|
||||
type args struct {
|
||||
value interface{}
|
||||
value any
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
|
|
@ -298,7 +298,7 @@ func TestSafeCastInt(t *testing.T) {
|
|||
|
||||
func TestSafeCastFloat64(t *testing.T) {
|
||||
type args struct {
|
||||
value interface{}
|
||||
value any
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
|
|
@ -342,7 +342,7 @@ func TestTrimLower(t *testing.T) {
|
|||
|
||||
func TestGetStrValueOfAnyType(t *testing.T) {
|
||||
type args struct {
|
||||
value interface{}
|
||||
value any
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
|
|
@ -357,7 +357,7 @@ func TestGetStrValueOfAnyType(t *testing.T) {
|
|||
{"string", args{"hello world"}, "hello world"},
|
||||
{"bool", args{true}, "true"},
|
||||
{"bool", args{false}, "false"},
|
||||
{"map", args{map[string]interface{}{"key1": "value1"}}, "{\"key1\":\"value1\"}"},
|
||||
{"map", args{map[string]any{"key1": "value1"}}, "{\"key1\":\"value1\"}"},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
|
|
|
|||
|
|
@ -66,8 +66,7 @@ func parseV1alpha1SkipList(artifact *artifact.Artifact, manifest *v1.Manifest) {
|
|||
skipListAnnotationKey := fmt.Sprintf("%s.%s.%s", AnnotationPrefix, V1alpha1, SkipList)
|
||||
skipList, ok := manifest.Config.Annotations[skipListAnnotationKey]
|
||||
if ok {
|
||||
skipKeyList := strings.Split(skipList, ",")
|
||||
for _, skipKey := range skipKeyList {
|
||||
for skipKey := range strings.SplitSeq(skipList, ",") {
|
||||
delete(metadata, skipKey)
|
||||
}
|
||||
artifact.ExtraAttrs = metadata
|
||||
|
|
|
|||
|
|
@ -231,7 +231,7 @@ func (p *v1alpha1TestSuite) TestParse() {
|
|||
manifestMediaType, content, err := manifest.Payload()
|
||||
p.Require().Nil(err)
|
||||
|
||||
metadata := map[string]interface{}{}
|
||||
metadata := map[string]any{}
|
||||
configBlob := io.NopCloser(strings.NewReader(ormbConfig))
|
||||
err = json.NewDecoder(configBlob).Decode(&metadata)
|
||||
p.Require().Nil(err)
|
||||
|
|
@ -244,7 +244,7 @@ func (p *v1alpha1TestSuite) TestParse() {
|
|||
p.Len(art.ExtraAttrs, 12)
|
||||
p.Equal("CNN Model", art.ExtraAttrs["description"])
|
||||
p.Equal("TensorFlow", art.ExtraAttrs["framework"])
|
||||
p.Equal([]interface{}{map[string]interface{}{"name": "batch_size", "value": "32"}}, art.ExtraAttrs["hyperparameters"])
|
||||
p.Equal([]any{map[string]any{"name": "batch_size", "value": "32"}}, art.ExtraAttrs["hyperparameters"])
|
||||
p.Equal("sha256:d923b93eadde0af5c639a972710a4d919066aba5d0dfbf4b9385099f70272da0", art.Icon)
|
||||
|
||||
// ormbManifestWithoutSkipList
|
||||
|
|
@ -255,7 +255,7 @@ func (p *v1alpha1TestSuite) TestParse() {
|
|||
manifestMediaType, content, err = manifest.Payload()
|
||||
p.Require().Nil(err)
|
||||
|
||||
metadata = map[string]interface{}{}
|
||||
metadata = map[string]any{}
|
||||
configBlob = io.NopCloser(strings.NewReader(ormbConfig))
|
||||
err = json.NewDecoder(configBlob).Decode(&metadata)
|
||||
p.Require().Nil(err)
|
||||
|
|
@ -268,7 +268,7 @@ func (p *v1alpha1TestSuite) TestParse() {
|
|||
p.Len(art.ExtraAttrs, 13)
|
||||
p.Equal("CNN Model", art.ExtraAttrs["description"])
|
||||
p.Equal("TensorFlow", art.ExtraAttrs["framework"])
|
||||
p.Equal([]interface{}{map[string]interface{}{"name": "batch_size", "value": "32"}}, art.ExtraAttrs["hyperparameters"])
|
||||
p.Equal([]any{map[string]any{"name": "batch_size", "value": "32"}}, art.ExtraAttrs["hyperparameters"])
|
||||
p.Equal("sha256:d923b93eadde0af5c639a972710a4d919066aba5d0dfbf4b9385099f70272da0", art.Icon)
|
||||
|
||||
// ormbManifestWithoutIcon
|
||||
|
|
@ -279,7 +279,7 @@ func (p *v1alpha1TestSuite) TestParse() {
|
|||
manifestMediaType, content, err = manifest.Payload()
|
||||
p.Require().Nil(err)
|
||||
|
||||
metadata = map[string]interface{}{}
|
||||
metadata = map[string]any{}
|
||||
configBlob = io.NopCloser(strings.NewReader(ormbConfig))
|
||||
err = json.NewDecoder(configBlob).Decode(&metadata)
|
||||
p.Require().Nil(err)
|
||||
|
|
@ -290,7 +290,7 @@ func (p *v1alpha1TestSuite) TestParse() {
|
|||
p.Len(art.ExtraAttrs, 12)
|
||||
p.Equal("CNN Model", art.ExtraAttrs["description"])
|
||||
p.Equal("TensorFlow", art.ExtraAttrs["framework"])
|
||||
p.Equal([]interface{}{map[string]interface{}{"name": "batch_size", "value": "32"}}, art.ExtraAttrs["hyperparameters"])
|
||||
p.Equal([]any{map[string]any{"name": "batch_size", "value": "32"}}, art.ExtraAttrs["hyperparameters"])
|
||||
p.Equal("", art.Icon)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -313,7 +313,7 @@ func (c *controller) getByTag(ctx context.Context, repository, tag string, optio
|
|||
return nil, err
|
||||
}
|
||||
tags, err := c.tagCtl.List(ctx, &q.Query{
|
||||
Keywords: map[string]interface{}{
|
||||
Keywords: map[string]any{
|
||||
"RepositoryID": repo.RepositoryID,
|
||||
"Name": tag,
|
||||
},
|
||||
|
|
@ -356,7 +356,7 @@ func (c *controller) deleteDeeply(ctx context.Context, id int64, isRoot, isAcces
|
|||
return nil
|
||||
}
|
||||
parents, err := c.artMgr.ListReferences(ctx, &q.Query{
|
||||
Keywords: map[string]interface{}{
|
||||
Keywords: map[string]any{
|
||||
"ChildID": id,
|
||||
},
|
||||
})
|
||||
|
|
@ -385,7 +385,7 @@ func (c *controller) deleteDeeply(ctx context.Context, id int64, isRoot, isAcces
|
|||
if acc.IsHard() {
|
||||
// if this acc artifact has parent(is child), set isRoot to false
|
||||
parents, err := c.artMgr.ListReferences(ctx, &q.Query{
|
||||
Keywords: map[string]interface{}{
|
||||
Keywords: map[string]any{
|
||||
"ChildID": acc.GetData().ArtifactID,
|
||||
},
|
||||
})
|
||||
|
|
@ -752,7 +752,7 @@ func (c *controller) populateIcon(art *Artifact) {
|
|||
|
||||
func (c *controller) populateTags(ctx context.Context, art *Artifact, option *tag.Option) {
|
||||
tags, err := c.tagCtl.List(ctx, &q.Query{
|
||||
Keywords: map[string]interface{}{
|
||||
Keywords: map[string]any{
|
||||
"artifact_id": art.ID,
|
||||
},
|
||||
}, option)
|
||||
|
|
|
|||
|
|
@ -56,7 +56,7 @@ func (suite *IteratorTestSuite) TeardownSuite() {
|
|||
|
||||
func (suite *IteratorTestSuite) TestIterator() {
|
||||
suite.accMgr.On("List", mock.Anything, mock.Anything).Return([]accessorymodel.Accessory{}, nil)
|
||||
q1 := &q.Query{PageNumber: 1, PageSize: 5, Keywords: map[string]interface{}{}}
|
||||
q1 := &q.Query{PageNumber: 1, PageSize: 5, Keywords: map[string]any{}}
|
||||
suite.artMgr.On("List", mock.Anything, q1).Return([]*artifact.Artifact{
|
||||
{ID: 1},
|
||||
{ID: 2},
|
||||
|
|
@ -65,7 +65,7 @@ func (suite *IteratorTestSuite) TestIterator() {
|
|||
{ID: 5},
|
||||
}, nil)
|
||||
|
||||
q2 := &q.Query{PageNumber: 2, PageSize: 5, Keywords: map[string]interface{}{}}
|
||||
q2 := &q.Query{PageNumber: 2, PageSize: 5, Keywords: map[string]any{}}
|
||||
suite.artMgr.On("List", mock.Anything, q2).Return([]*artifact.Artifact{
|
||||
{ID: 6},
|
||||
{ID: 7},
|
||||
|
|
|
|||
|
|
@ -40,7 +40,7 @@ func (artifact *Artifact) UnmarshalJSON(data []byte) error {
|
|||
type Alias Artifact
|
||||
ali := &struct {
|
||||
*Alias
|
||||
AccessoryItems []interface{} `json:"accessories,omitempty"`
|
||||
AccessoryItems []any `json:"accessories,omitempty"`
|
||||
}{
|
||||
Alias: (*Alias)(artifact),
|
||||
}
|
||||
|
|
|
|||
|
|
@ -44,7 +44,7 @@ type ManifestProcessor struct {
|
|||
// AbstractMetadata abstracts metadata of artifact
|
||||
func (m *ManifestProcessor) AbstractMetadata(ctx context.Context, artifact *artifact.Artifact, content []byte) error {
|
||||
// parse metadata from config layer
|
||||
metadata := map[string]interface{}{}
|
||||
metadata := map[string]any{}
|
||||
if err := m.UnmarshalConfig(ctx, artifact.RepositoryName, content, &metadata); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -55,7 +55,7 @@ func (m *ManifestProcessor) AbstractMetadata(ctx context.Context, artifact *arti
|
|||
}
|
||||
|
||||
if artifact.ExtraAttrs == nil {
|
||||
artifact.ExtraAttrs = map[string]interface{}{}
|
||||
artifact.ExtraAttrs = map[string]any{}
|
||||
}
|
||||
for _, property := range m.properties {
|
||||
artifact.ExtraAttrs[property] = metadata[property]
|
||||
|
|
@ -80,7 +80,7 @@ func (m *ManifestProcessor) ListAdditionTypes(_ context.Context, _ *artifact.Art
|
|||
}
|
||||
|
||||
// UnmarshalConfig unmarshal the config blob of the artifact into the specified object "v"
|
||||
func (m *ManifestProcessor) UnmarshalConfig(_ context.Context, repository string, manifest []byte, v interface{}) error {
|
||||
func (m *ManifestProcessor) UnmarshalConfig(_ context.Context, repository string, manifest []byte, v any) error {
|
||||
// unmarshal manifest
|
||||
mani := &v1.Manifest{}
|
||||
if err := json.Unmarshal(manifest, mani); err != nil {
|
||||
|
|
|
|||
|
|
@ -89,7 +89,7 @@ func (p *processorTestSuite) TestAbstractAddition() {
|
|||
Repository: "github.com/goharbor",
|
||||
},
|
||||
},
|
||||
Values: map[string]interface{}{
|
||||
Values: map[string]any{
|
||||
"cluster.enable": true,
|
||||
"cluster.slaveCount": 1,
|
||||
"image.pullPolicy": "Always",
|
||||
|
|
|
|||
|
|
@ -17,6 +17,8 @@ package parser
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"path/filepath"
|
||||
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
||||
|
|
@ -40,6 +42,11 @@ const (
|
|||
|
||||
// defaultFileSizeLimit is the default file size limit.
|
||||
defaultFileSizeLimit = 1024 * 1024 * 4 // 4MB
|
||||
|
||||
// formatTar is the format of tar file.
|
||||
formatTar = ".tar"
|
||||
// formatRaw is the format of raw file.
|
||||
formatRaw = ".raw"
|
||||
)
|
||||
|
||||
// newBase creates a new base parser.
|
||||
|
|
@ -70,10 +77,23 @@ func (b *base) Parse(_ context.Context, artifact *artifact.Artifact, layer *ocis
|
|||
}
|
||||
|
||||
defer stream.Close()
|
||||
content, err := untar(stream)
|
||||
|
||||
content, err := decodeContent(layer.MediaType, stream)
|
||||
if err != nil {
|
||||
return "", nil, fmt.Errorf("failed to untar the content: %w", err)
|
||||
return "", nil, fmt.Errorf("failed to decode content: %w", err)
|
||||
}
|
||||
|
||||
return contentTypeTextPlain, content, nil
|
||||
}
|
||||
|
||||
func decodeContent(mediaType string, reader io.Reader) ([]byte, error) {
|
||||
format := filepath.Ext(mediaType)
|
||||
switch format {
|
||||
case formatTar:
|
||||
return untar(reader)
|
||||
case formatRaw:
|
||||
return io.ReadAll(reader)
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported format: %s", format)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -63,10 +63,11 @@ func TestBaseParse(t *testing.T) {
|
|||
expectedError: "failed to pull blob from registry: registry error",
|
||||
},
|
||||
{
|
||||
name: "successful parse",
|
||||
name: "successful parse (tar format)",
|
||||
artifact: &artifact.Artifact{RepositoryName: "test/repo"},
|
||||
layer: &v1.Descriptor{
|
||||
Digest: "sha256:1234",
|
||||
MediaType: "vnd.foo.bar.tar",
|
||||
Digest: "sha256:1234",
|
||||
},
|
||||
mockSetup: func(m *mock.Client) {
|
||||
var buf bytes.Buffer
|
||||
|
|
@ -82,6 +83,34 @@ func TestBaseParse(t *testing.T) {
|
|||
},
|
||||
expectedType: contentTypeTextPlain,
|
||||
},
|
||||
{
|
||||
name: "successful parse (raw format)",
|
||||
artifact: &artifact.Artifact{RepositoryName: "test/repo"},
|
||||
layer: &v1.Descriptor{
|
||||
MediaType: "vnd.foo.bar.raw",
|
||||
Digest: "sha256:1234",
|
||||
},
|
||||
mockSetup: func(m *mock.Client) {
|
||||
var buf bytes.Buffer
|
||||
buf.Write([]byte("test content"))
|
||||
m.On("PullBlob", "test/repo", "sha256:1234").Return(int64(0), io.NopCloser(bytes.NewReader(buf.Bytes())), nil)
|
||||
},
|
||||
expectedType: contentTypeTextPlain,
|
||||
},
|
||||
{
|
||||
name: "error parse (unsupported format)",
|
||||
artifact: &artifact.Artifact{RepositoryName: "test/repo"},
|
||||
layer: &v1.Descriptor{
|
||||
MediaType: "vnd.foo.bar.unknown",
|
||||
Digest: "sha256:1234",
|
||||
},
|
||||
mockSetup: func(m *mock.Client) {
|
||||
var buf bytes.Buffer
|
||||
buf.Write([]byte("test content"))
|
||||
m.On("PullBlob", "test/repo", "sha256:1234").Return(int64(0), io.NopCloser(bytes.NewReader(buf.Bytes())), nil)
|
||||
},
|
||||
expectedError: "failed to decode content: unsupported format: .unknown",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
|
|
|
|||
|
|
@ -17,6 +17,7 @@ package parser
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
|
||||
modelspec "github.com/CloudNativeAI/model-spec/specs-go/v1"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
|
@ -47,7 +48,10 @@ func (l *license) Parse(ctx context.Context, artifact *artifact.Artifact, manife
|
|||
// lookup the license file layer
|
||||
var layer *ocispec.Descriptor
|
||||
for _, desc := range manifest.Layers {
|
||||
if desc.MediaType == modelspec.MediaTypeModelDoc {
|
||||
if slices.Contains([]string{
|
||||
modelspec.MediaTypeModelDoc,
|
||||
modelspec.MediaTypeModelDocRaw,
|
||||
}, desc.MediaType) {
|
||||
if desc.Annotations != nil {
|
||||
filepath := desc.Annotations[modelspec.AnnotationFilepath]
|
||||
if filepath == "LICENSE" || filepath == "LICENSE.txt" {
|
||||
|
|
|
|||
|
|
@ -83,6 +83,29 @@ func TestLicenseParser(t *testing.T) {
|
|||
expectedType: contentTypeTextPlain,
|
||||
expectedOutput: []byte("MIT License"),
|
||||
},
|
||||
{
|
||||
name: "LICENSE parse success (raw)",
|
||||
manifest: &ocispec.Manifest{
|
||||
Layers: []ocispec.Descriptor{
|
||||
{
|
||||
MediaType: modelspec.MediaTypeModelDocRaw,
|
||||
Annotations: map[string]string{
|
||||
modelspec.AnnotationFilepath: "LICENSE",
|
||||
},
|
||||
Digest: "sha256:abc123",
|
||||
},
|
||||
},
|
||||
},
|
||||
setupMockReg: func(mc *mockregistry.Client) {
|
||||
var buf bytes.Buffer
|
||||
buf.Write([]byte("MIT License"))
|
||||
|
||||
mc.On("PullBlob", mock.Anything, "sha256:abc123").
|
||||
Return(int64(buf.Len()), io.NopCloser(bytes.NewReader(buf.Bytes())), nil)
|
||||
},
|
||||
expectedType: contentTypeTextPlain,
|
||||
expectedOutput: []byte("MIT License"),
|
||||
},
|
||||
{
|
||||
name: "LICENSE.txt parse success",
|
||||
manifest: &ocispec.Manifest{
|
||||
|
|
|
|||
|
|
@ -17,6 +17,7 @@ package parser
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
|
||||
modelspec "github.com/CloudNativeAI/model-spec/specs-go/v1"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
|
@ -47,7 +48,10 @@ func (r *readme) Parse(ctx context.Context, artifact *artifact.Artifact, manifes
|
|||
// lookup the readme file layer.
|
||||
var layer *ocispec.Descriptor
|
||||
for _, desc := range manifest.Layers {
|
||||
if desc.MediaType == modelspec.MediaTypeModelDoc {
|
||||
if slices.Contains([]string{
|
||||
modelspec.MediaTypeModelDoc,
|
||||
modelspec.MediaTypeModelDocRaw,
|
||||
}, desc.MediaType) {
|
||||
if desc.Annotations != nil {
|
||||
filepath := desc.Annotations[modelspec.AnnotationFilepath]
|
||||
if filepath == "README" || filepath == "README.md" {
|
||||
|
|
|
|||
|
|
@ -113,6 +113,29 @@ func TestReadmeParser(t *testing.T) {
|
|||
expectedType: contentTypeMarkdown,
|
||||
expectedOutput: []byte("# Test README"),
|
||||
},
|
||||
{
|
||||
name: "README parse success (raw)",
|
||||
manifest: &ocispec.Manifest{
|
||||
Layers: []ocispec.Descriptor{
|
||||
{
|
||||
MediaType: modelspec.MediaTypeModelDocRaw,
|
||||
Annotations: map[string]string{
|
||||
modelspec.AnnotationFilepath: "README",
|
||||
},
|
||||
Digest: "sha256:def456",
|
||||
},
|
||||
},
|
||||
},
|
||||
setupMockReg: func(mc *mockregistry.Client) {
|
||||
var buf bytes.Buffer
|
||||
buf.Write([]byte("# Test README"))
|
||||
|
||||
mc.On("PullBlob", mock.Anything, "sha256:def456").
|
||||
Return(int64(buf.Len()), io.NopCloser(bytes.NewReader(buf.Bytes())), nil)
|
||||
},
|
||||
expectedType: contentTypeMarkdown,
|
||||
expectedOutput: []byte("# Test README"),
|
||||
},
|
||||
{
|
||||
name: "registry error",
|
||||
manifest: &ocispec.Manifest{
|
||||
|
|
|
|||
|
|
@ -156,7 +156,7 @@ func TestAddNode(t *testing.T) {
|
|||
// Verify the path exists.
|
||||
current := root
|
||||
parts := filepath.Clean(tt.path)
|
||||
for _, part := range strings.Split(parts, string(filepath.Separator)) {
|
||||
for part := range strings.SplitSeq(parts, string(filepath.Separator)) {
|
||||
if part == "" {
|
||||
continue
|
||||
}
|
||||
|
|
|
|||
|
|
@ -110,7 +110,7 @@ func (d *defaultProcessor) AbstractMetadata(ctx context.Context, artifact *artif
|
|||
}
|
||||
defer blob.Close()
|
||||
// parse metadata from config layer
|
||||
metadata := map[string]interface{}{}
|
||||
metadata := map[string]any{}
|
||||
if err = json.NewDecoder(blob).Decode(&metadata); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -268,7 +268,7 @@ func (d *defaultProcessorTestSuite) TestAbstractMetadata() {
|
|||
manifestMediaType, content, err := manifest.Payload()
|
||||
d.Require().Nil(err)
|
||||
|
||||
metadata := map[string]interface{}{}
|
||||
metadata := map[string]any{}
|
||||
configBlob := io.NopCloser(strings.NewReader(ormbConfig))
|
||||
err = json.NewDecoder(configBlob).Decode(&metadata)
|
||||
d.Require().Nil(err)
|
||||
|
|
@ -289,7 +289,7 @@ func (d *defaultProcessorTestSuite) TestAbstractMetadataOfOCIManifesttWithUnknow
|
|||
d.Require().Nil(err)
|
||||
|
||||
configBlob := io.NopCloser(strings.NewReader(UnknownJsonConfig))
|
||||
metadata := map[string]interface{}{}
|
||||
metadata := map[string]any{}
|
||||
err = json.NewDecoder(configBlob).Decode(&metadata)
|
||||
d.Require().Nil(err)
|
||||
|
||||
|
|
|
|||
|
|
@ -44,7 +44,7 @@ func (m *manifestV1Processor) AbstractMetadata(_ context.Context, artifact *arti
|
|||
return err
|
||||
}
|
||||
if artifact.ExtraAttrs == nil {
|
||||
artifact.ExtraAttrs = map[string]interface{}{}
|
||||
artifact.ExtraAttrs = map[string]any{}
|
||||
}
|
||||
artifact.ExtraAttrs["architecture"] = mani.Architecture
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -59,7 +59,7 @@ func (m *manifestV2Processor) AbstractMetadata(ctx context.Context, artifact *ar
|
|||
return err
|
||||
}
|
||||
if artifact.ExtraAttrs == nil {
|
||||
artifact.ExtraAttrs = map[string]interface{}{}
|
||||
artifact.ExtraAttrs = map[string]any{}
|
||||
}
|
||||
artifact.ExtraAttrs["created"] = config.Created
|
||||
artifact.ExtraAttrs["architecture"] = config.Architecture
|
||||
|
|
|
|||
|
|
@ -62,14 +62,14 @@ type Processor struct {
|
|||
}
|
||||
|
||||
func (m *Processor) AbstractMetadata(ctx context.Context, art *artifact.Artifact, manifestBody []byte) error {
|
||||
art.ExtraAttrs = map[string]interface{}{}
|
||||
art.ExtraAttrs = map[string]any{}
|
||||
manifest := &v1.Manifest{}
|
||||
if err := json.Unmarshal(manifestBody, manifest); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if art.ExtraAttrs == nil {
|
||||
art.ExtraAttrs = map[string]interface{}{}
|
||||
art.ExtraAttrs = map[string]any{}
|
||||
}
|
||||
if manifest.Annotations[AnnotationVariantKey] == AnnotationVariantValue || manifest.Annotations[AnnotationHandlerKey] == AnnotationHandlerValue {
|
||||
// for annotation way
|
||||
|
|
|
|||
|
|
@ -225,10 +225,10 @@ func (c *controller) Get(ctx context.Context, digest string, options ...Option)
|
|||
|
||||
opts := newOptions(options...)
|
||||
|
||||
keywords := make(map[string]interface{})
|
||||
keywords := make(map[string]any)
|
||||
if digest != "" {
|
||||
ol := q.OrList{
|
||||
Values: []interface{}{
|
||||
Values: []any{
|
||||
digest,
|
||||
},
|
||||
}
|
||||
|
|
|
|||
|
|
@ -232,7 +232,7 @@ func (suite *ControllerTestSuite) TestGet() {
|
|||
|
||||
func (suite *ControllerTestSuite) TestSync() {
|
||||
var references []distribution.Descriptor
|
||||
for i := 0; i < 5; i++ {
|
||||
for i := range 5 {
|
||||
references = append(references, distribution.Descriptor{
|
||||
MediaType: fmt.Sprintf("media type %d", i),
|
||||
Digest: suite.Digest(),
|
||||
|
|
|
|||
|
|
@ -46,11 +46,11 @@ type Controller interface {
|
|||
// UserConfigs get the user scope configurations
|
||||
UserConfigs(ctx context.Context) (map[string]*models.Value, error)
|
||||
// UpdateUserConfigs update the user scope configurations
|
||||
UpdateUserConfigs(ctx context.Context, conf map[string]interface{}) error
|
||||
UpdateUserConfigs(ctx context.Context, conf map[string]any) error
|
||||
// AllConfigs get all configurations, used by internal, should include the system config items
|
||||
AllConfigs(ctx context.Context) (map[string]interface{}, error)
|
||||
AllConfigs(ctx context.Context) (map[string]any, error)
|
||||
// ConvertForGet - delete sensitive attrs and add editable field to every attr
|
||||
ConvertForGet(ctx context.Context, cfg map[string]interface{}, internal bool) (map[string]*models.Value, error)
|
||||
ConvertForGet(ctx context.Context, cfg map[string]any, internal bool) (map[string]*models.Value, error)
|
||||
// OverwriteConfig overwrite config in the database and set all configure read only when CONFIG_OVERWRITE_JSON is provided
|
||||
OverwriteConfig(ctx context.Context) error
|
||||
}
|
||||
|
|
@ -70,13 +70,13 @@ func (c *controller) UserConfigs(ctx context.Context) (map[string]*models.Value,
|
|||
return c.ConvertForGet(ctx, configs, false)
|
||||
}
|
||||
|
||||
func (c *controller) AllConfigs(ctx context.Context) (map[string]interface{}, error) {
|
||||
func (c *controller) AllConfigs(ctx context.Context) (map[string]any, error) {
|
||||
mgr := config.GetCfgManager(ctx)
|
||||
configs := mgr.GetAll(ctx)
|
||||
return configs, nil
|
||||
}
|
||||
|
||||
func (c *controller) UpdateUserConfigs(ctx context.Context, conf map[string]interface{}) error {
|
||||
func (c *controller) UpdateUserConfigs(ctx context.Context, conf map[string]any) error {
|
||||
if readOnlyForAll {
|
||||
return errors.ForbiddenError(nil).WithMessage("current config is init by env variable: CONFIG_OVERWRITE_JSON, it cannot be updated")
|
||||
}
|
||||
|
|
@ -97,7 +97,7 @@ func (c *controller) UpdateUserConfigs(ctx context.Context, conf map[string]inte
|
|||
return c.updateLogEndpoint(ctx, conf)
|
||||
}
|
||||
|
||||
func (c *controller) updateLogEndpoint(ctx context.Context, cfgs map[string]interface{}) error {
|
||||
func (c *controller) updateLogEndpoint(ctx context.Context, cfgs map[string]any) error {
|
||||
// check if the audit log forward endpoint updated
|
||||
if _, ok := cfgs[common.AuditLogForwardEndpoint]; ok {
|
||||
auditEP := config.AuditLogForwardEndpoint(ctx)
|
||||
|
|
@ -112,7 +112,7 @@ func (c *controller) updateLogEndpoint(ctx context.Context, cfgs map[string]inte
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *controller) validateCfg(ctx context.Context, cfgs map[string]interface{}) error {
|
||||
func (c *controller) validateCfg(ctx context.Context, cfgs map[string]any) error {
|
||||
mgr := config.GetCfgManager(ctx)
|
||||
|
||||
// check if auth can be modified
|
||||
|
|
@ -146,7 +146,7 @@ func (c *controller) validateCfg(ctx context.Context, cfgs map[string]interface{
|
|||
return nil
|
||||
}
|
||||
|
||||
func verifySkipAuditLogCfg(ctx context.Context, cfgs map[string]interface{}, mgr config.Manager) error {
|
||||
func verifySkipAuditLogCfg(ctx context.Context, cfgs map[string]any, mgr config.Manager) error {
|
||||
updated := false
|
||||
endPoint := mgr.Get(ctx, common.AuditLogForwardEndpoint).GetString()
|
||||
skipAuditDB := mgr.Get(ctx, common.SkipAuditLogDatabase).GetBool()
|
||||
|
|
@ -169,7 +169,7 @@ func verifySkipAuditLogCfg(ctx context.Context, cfgs map[string]interface{}, mgr
|
|||
}
|
||||
|
||||
// verifyValueLengthCfg verifies the cfgs which need to check the value max length to align with frontend.
|
||||
func verifyValueLengthCfg(_ context.Context, cfgs map[string]interface{}) error {
|
||||
func verifyValueLengthCfg(_ context.Context, cfgs map[string]any) error {
|
||||
maxValue := maxValueLimitedByLength(common.UIMaxLengthLimitedOfNumber)
|
||||
validateCfgs := []string{
|
||||
common.TokenExpiration,
|
||||
|
|
@ -206,7 +206,7 @@ func maxValueLimitedByLength(length int) int64 {
|
|||
var value int64
|
||||
// the times for multiple, should *10 for every time
|
||||
times := 1
|
||||
for i := 0; i < length; i++ {
|
||||
for range length {
|
||||
value = value + int64(9*times)
|
||||
times = times * 10
|
||||
}
|
||||
|
|
@ -217,11 +217,11 @@ func maxValueLimitedByLength(length int) int64 {
|
|||
// ScanAllPolicy is represent the json request and object for scan all policy
|
||||
// Only for migrating from the legacy schedule.
|
||||
type ScanAllPolicy struct {
|
||||
Type string `json:"type"`
|
||||
Param map[string]interface{} `json:"parameter,omitempty"`
|
||||
Type string `json:"type"`
|
||||
Param map[string]any `json:"parameter,omitempty"`
|
||||
}
|
||||
|
||||
func (c *controller) ConvertForGet(ctx context.Context, cfg map[string]interface{}, internal bool) (map[string]*models.Value, error) {
|
||||
func (c *controller) ConvertForGet(ctx context.Context, cfg map[string]any, internal bool) (map[string]*models.Value, error) {
|
||||
result := map[string]*models.Value{}
|
||||
|
||||
mList := metadata.Instance().GetAll()
|
||||
|
|
@ -270,7 +270,7 @@ func (c *controller) ConvertForGet(ctx context.Context, cfg map[string]interface
|
|||
}
|
||||
|
||||
func (c *controller) OverwriteConfig(ctx context.Context) error {
|
||||
cfgMap := map[string]interface{}{}
|
||||
cfgMap := map[string]any{}
|
||||
if v, ok := os.LookupEnv(configOverwriteJSON); ok {
|
||||
err := json.Unmarshal([]byte(v), &cfgMap)
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@ func Test_verifySkipAuditLogCfg(t *testing.T) {
|
|||
Return(&metadata.ConfigureValue{Name: common.SkipAuditLogDatabase, Value: "true"})
|
||||
type args struct {
|
||||
ctx context.Context
|
||||
cfgs map[string]interface{}
|
||||
cfgs map[string]any
|
||||
mgr config.Manager
|
||||
}
|
||||
tests := []struct {
|
||||
|
|
@ -42,17 +42,17 @@ func Test_verifySkipAuditLogCfg(t *testing.T) {
|
|||
wantErr bool
|
||||
}{
|
||||
{name: "both configured", args: args{ctx: context.TODO(),
|
||||
cfgs: map[string]interface{}{common.AuditLogForwardEndpoint: "harbor-log:15041",
|
||||
cfgs: map[string]any{common.AuditLogForwardEndpoint: "harbor-log:15041",
|
||||
common.SkipAuditLogDatabase: true},
|
||||
mgr: cfgManager}, wantErr: false},
|
||||
{name: "no forward endpoint config", args: args{ctx: context.TODO(),
|
||||
cfgs: map[string]interface{}{common.SkipAuditLogDatabase: true},
|
||||
cfgs: map[string]any{common.SkipAuditLogDatabase: true},
|
||||
mgr: cfgManager}, wantErr: true},
|
||||
{name: "none configured", args: args{ctx: context.TODO(),
|
||||
cfgs: map[string]interface{}{},
|
||||
cfgs: map[string]any{},
|
||||
mgr: cfgManager}, wantErr: false},
|
||||
{name: "enabled skip audit log database, but change log forward endpoint to empty", args: args{ctx: context.TODO(),
|
||||
cfgs: map[string]interface{}{common.AuditLogForwardEndpoint: ""},
|
||||
cfgs: map[string]any{common.AuditLogForwardEndpoint: ""},
|
||||
mgr: cfgManager}, wantErr: true},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
|
|
@ -89,24 +89,24 @@ func Test_maxValueLimitedByLength(t *testing.T) {
|
|||
func Test_verifyValueLengthCfg(t *testing.T) {
|
||||
type args struct {
|
||||
ctx context.Context
|
||||
cfgs map[string]interface{}
|
||||
cfgs map[string]any
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
wantErr bool
|
||||
}{
|
||||
{name: "valid config", args: args{context.TODO(), map[string]interface{}{
|
||||
{name: "valid config", args: args{context.TODO(), map[string]any{
|
||||
common.TokenExpiration: float64(100),
|
||||
common.RobotTokenDuration: float64(100),
|
||||
common.SessionTimeout: float64(100),
|
||||
}}, wantErr: false},
|
||||
{name: "invalid config with negative value", args: args{context.TODO(), map[string]interface{}{
|
||||
{name: "invalid config with negative value", args: args{context.TODO(), map[string]any{
|
||||
common.TokenExpiration: float64(-1),
|
||||
common.RobotTokenDuration: float64(100),
|
||||
common.SessionTimeout: float64(100),
|
||||
}}, wantErr: true},
|
||||
{name: "invalid config with value over length limit", args: args{context.TODO(), map[string]interface{}{
|
||||
{name: "invalid config with value over length limit", args: args{context.TODO(), map[string]any{
|
||||
common.TokenExpiration: float64(100),
|
||||
common.RobotTokenDuration: float64(100000000000000000),
|
||||
common.SessionTimeout: float64(100),
|
||||
|
|
|
|||
|
|
@ -28,12 +28,12 @@ import (
|
|||
htesting "github.com/goharbor/harbor/src/testing"
|
||||
)
|
||||
|
||||
var TestDBConfig = map[string]interface{}{
|
||||
var TestDBConfig = map[string]any{
|
||||
common.LDAPBaseDN: "dc=example,dc=com",
|
||||
common.LDAPURL: "ldap.example.com",
|
||||
}
|
||||
|
||||
var TestConfigWithScanAll = map[string]interface{}{
|
||||
var TestConfigWithScanAll = map[string]any{
|
||||
"postgresql_host": "localhost",
|
||||
"postgresql_database": "registry",
|
||||
"postgresql_password": "root123",
|
||||
|
|
@ -67,7 +67,7 @@ func (c *controllerTestSuite) TestGetUserCfg() {
|
|||
}
|
||||
|
||||
func (c *controllerTestSuite) TestConvertForGet() {
|
||||
conf := map[string]interface{}{
|
||||
conf := map[string]any{
|
||||
"ldap_url": "ldaps.myexample,com",
|
||||
"ldap_base_dn": "dc=myexample,dc=com",
|
||||
"auth_mode": "ldap_auth",
|
||||
|
|
@ -83,7 +83,7 @@ func (c *controllerTestSuite) TestConvertForGet() {
|
|||
c.False(exist)
|
||||
|
||||
// password type should be sent to internal api call
|
||||
conf2 := map[string]interface{}{
|
||||
conf2 := map[string]any{
|
||||
"ldap_url": "ldaps.myexample,com",
|
||||
"ldap_base_dn": "dc=myexample,dc=com",
|
||||
"auth_mode": "ldap_auth",
|
||||
|
|
@ -109,7 +109,7 @@ func (c *controllerTestSuite) TestGetAll() {
|
|||
|
||||
func (c *controllerTestSuite) TestUpdateUserCfg() {
|
||||
|
||||
userConf := map[string]interface{}{
|
||||
userConf := map[string]any{
|
||||
common.LDAPURL: "ldaps.myexample,com",
|
||||
common.LDAPBaseDN: "dc=myexample,dc=com",
|
||||
}
|
||||
|
|
@ -121,7 +121,7 @@ func (c *controllerTestSuite) TestUpdateUserCfg() {
|
|||
}
|
||||
c.Equal("dc=myexample,dc=com", cfgResp["ldap_base_dn"].Val)
|
||||
c.Equal("ldaps.myexample,com", cfgResp["ldap_url"].Val)
|
||||
badCfg := map[string]interface{}{
|
||||
badCfg := map[string]any{
|
||||
common.LDAPScope: 5,
|
||||
}
|
||||
err2 := c.controller.UpdateUserConfigs(ctx, badCfg)
|
||||
|
|
@ -130,7 +130,7 @@ func (c *controllerTestSuite) TestUpdateUserCfg() {
|
|||
}
|
||||
|
||||
/*func (c *controllerTestSuite) TestCheckUnmodifiable() {
|
||||
conf := map[string]interface{}{
|
||||
conf := map[string]any{
|
||||
"ldap_url": "ldaps.myexample,com",
|
||||
"ldap_base_dn": "dc=myexample,dc=com",
|
||||
"auth_mode": "ldap_auth",
|
||||
|
|
|
|||
|
|
@ -41,7 +41,7 @@ func (h *Handler) Name() string {
|
|||
}
|
||||
|
||||
// Handle ...
|
||||
func (h *Handler) Handle(ctx context.Context, value interface{}) error {
|
||||
func (h *Handler) Handle(ctx context.Context, value any) error {
|
||||
var addAuditLog bool
|
||||
switch v := value.(type) {
|
||||
case *event.PushArtifactEvent, *event.DeleteArtifactEvent,
|
||||
|
|
|
|||
|
|
@ -99,7 +99,7 @@ func (a *ArtifactEventHandler) Name() string {
|
|||
}
|
||||
|
||||
// Handle ...
|
||||
func (a *ArtifactEventHandler) Handle(ctx context.Context, value interface{}) error {
|
||||
func (a *ArtifactEventHandler) Handle(ctx context.Context, value any) error {
|
||||
switch v := value.(type) {
|
||||
case *event.PullArtifactEvent:
|
||||
return a.onPull(ctx, v.ArtifactEvent)
|
||||
|
|
@ -190,7 +190,7 @@ func (a *ArtifactEventHandler) syncFlushPullTime(ctx context.Context, artifactID
|
|||
|
||||
if tagName != "" {
|
||||
tags, err := tag.Ctl.List(ctx, q.New(
|
||||
map[string]interface{}{
|
||||
map[string]any{
|
||||
"ArtifactID": artifactID,
|
||||
"Name": tagName,
|
||||
}), nil)
|
||||
|
|
|
|||
|
|
@ -53,7 +53,7 @@ func (a *ProjectEventHandler) onProjectDelete(ctx context.Context, event *event.
|
|||
}
|
||||
|
||||
// Handle handle project event
|
||||
func (a *ProjectEventHandler) Handle(ctx context.Context, value interface{}) error {
|
||||
func (a *ProjectEventHandler) Handle(ctx context.Context, value any) error {
|
||||
switch v := value.(type) {
|
||||
case *event.DeleteProjectEvent:
|
||||
return a.onProjectDelete(ctx, v)
|
||||
|
|
|
|||
|
|
@ -36,7 +36,7 @@ func (p *Handler) Name() string {
|
|||
}
|
||||
|
||||
// Handle ...
|
||||
func (p *Handler) Handle(ctx context.Context, value interface{}) error {
|
||||
func (p *Handler) Handle(ctx context.Context, value any) error {
|
||||
switch v := value.(type) {
|
||||
case *event.PushArtifactEvent:
|
||||
return p.handlePushArtifact(ctx, v)
|
||||
|
|
|
|||
|
|
@ -82,7 +82,7 @@ func (suite *PreheatTestSuite) TestName() {
|
|||
// TestHandle ...
|
||||
func (suite *PreheatTestSuite) TestHandle() {
|
||||
type args struct {
|
||||
data interface{}
|
||||
data any
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
|
|
|
|||
|
|
@ -36,7 +36,7 @@ func (r *Handler) Name() string {
|
|||
}
|
||||
|
||||
// Handle ...
|
||||
func (r *Handler) Handle(ctx context.Context, value interface{}) error {
|
||||
func (r *Handler) Handle(ctx context.Context, value any) error {
|
||||
pushArtEvent, ok := value.(*event.PushArtifactEvent)
|
||||
if ok {
|
||||
return r.handlePushArtifact(ctx, pushArtEvent)
|
||||
|
|
@ -78,7 +78,7 @@ func (r *Handler) handlePushArtifact(ctx context.Context, event *event.PushArtif
|
|||
Metadata: &model.ResourceMetadata{
|
||||
Repository: &model.Repository{
|
||||
Name: event.Repository,
|
||||
Metadata: map[string]interface{}{
|
||||
Metadata: map[string]any{
|
||||
"public": strconv.FormatBool(public),
|
||||
},
|
||||
},
|
||||
|
|
@ -138,7 +138,7 @@ func (r *Handler) handleCreateTag(ctx context.Context, event *event.CreateTagEve
|
|||
Metadata: &model.ResourceMetadata{
|
||||
Repository: &model.Repository{
|
||||
Name: event.Repository,
|
||||
Metadata: map[string]interface{}{
|
||||
Metadata: map[string]any{
|
||||
"public": strconv.FormatBool(public),
|
||||
},
|
||||
},
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ func TestMain(m *testing.M) {
|
|||
}
|
||||
|
||||
func TestBuildImageResourceURL(t *testing.T) {
|
||||
cfg := map[string]interface{}{
|
||||
cfg := map[string]any{
|
||||
common.ExtEndpoint: "https://demo.goharbor.io",
|
||||
}
|
||||
config.InitWithSettings(cfg)
|
||||
|
|
|
|||
|
|
@ -39,7 +39,7 @@ func (a *Handler) Name() string {
|
|||
}
|
||||
|
||||
// Handle preprocess artifact event data and then publish hook event
|
||||
func (a *Handler) Handle(ctx context.Context, value interface{}) error {
|
||||
func (a *Handler) Handle(ctx context.Context, value any) error {
|
||||
if !config.NotificationEnable(ctx) {
|
||||
log.Debug("notification feature is not enabled")
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -45,7 +45,7 @@ func (r *ReplicationHandler) Name() string {
|
|||
}
|
||||
|
||||
// Handle ...
|
||||
func (r *ReplicationHandler) Handle(ctx context.Context, value interface{}) error {
|
||||
func (r *ReplicationHandler) Handle(ctx context.Context, value any) error {
|
||||
if !config.NotificationEnable(ctx) {
|
||||
log.Debug("notification feature is not enabled")
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -73,7 +73,7 @@ func TestReplicationHandler_Handle(t *testing.T) {
|
|||
handler := &ReplicationHandler{}
|
||||
|
||||
type args struct {
|
||||
data interface{}
|
||||
data any
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
|
|
|
|||
|
|
@ -40,7 +40,7 @@ func (r *RetentionHandler) Name() string {
|
|||
}
|
||||
|
||||
// Handle ...
|
||||
func (r *RetentionHandler) Handle(ctx context.Context, value interface{}) error {
|
||||
func (r *RetentionHandler) Handle(ctx context.Context, value any) error {
|
||||
if !config.NotificationEnable(ctx) {
|
||||
log.Debug("notification feature is not enabled")
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -61,7 +61,7 @@ func TestRetentionHandler_Handle(t *testing.T) {
|
|||
}, nil)
|
||||
|
||||
type args struct {
|
||||
data interface{}
|
||||
data any
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
|
|
|
|||
|
|
@ -38,7 +38,7 @@ func (qp *Handler) Name() string {
|
|||
}
|
||||
|
||||
// Handle ...
|
||||
func (qp *Handler) Handle(ctx context.Context, value interface{}) error {
|
||||
func (qp *Handler) Handle(ctx context.Context, value any) error {
|
||||
quotaEvent, ok := value.(*event.QuotaEvent)
|
||||
if !ok {
|
||||
return errors.New("invalid quota event type")
|
||||
|
|
|
|||
|
|
@ -53,7 +53,7 @@ func TestQuotaPreprocessHandler(t *testing.T) {
|
|||
// SetupSuite prepares env for test suite.
|
||||
func (suite *QuotaPreprocessHandlerSuite) SetupSuite() {
|
||||
common_dao.PrepareTestForPostgresSQL()
|
||||
cfg := map[string]interface{}{
|
||||
cfg := map[string]any{
|
||||
common.NotificationEnable: true,
|
||||
}
|
||||
config.InitWithSettings(cfg)
|
||||
|
|
@ -110,7 +110,7 @@ func (m *MockHandler) Name() string {
|
|||
}
|
||||
|
||||
// Handle ...
|
||||
func (m *MockHandler) Handle(ctx context.Context, value interface{}) error {
|
||||
func (m *MockHandler) Handle(ctx context.Context, value any) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -42,7 +42,7 @@ func (si *Handler) Name() string {
|
|||
}
|
||||
|
||||
// Handle preprocess chart event data and then publish hook event
|
||||
func (si *Handler) Handle(ctx context.Context, value interface{}) error {
|
||||
func (si *Handler) Handle(ctx context.Context, value any) error {
|
||||
if value == nil {
|
||||
return errors.New("empty scan artifact event")
|
||||
}
|
||||
|
|
@ -129,7 +129,7 @@ func constructScanImagePayload(ctx context.Context, event *event.ScanImageEvent,
|
|||
// Wait for reasonable time to make sure the report is ready
|
||||
// Interval=500ms and total time = 5s
|
||||
// If the report is still not ready in the total time, then failed at then
|
||||
for i := 0; i < 10; i++ {
|
||||
for range 10 {
|
||||
// First check in case it is ready
|
||||
if re, err := scan.DefaultController.GetReport(ctx, art, []string{v1.MimeTypeNativeReport, v1.MimeTypeGenericVulnerabilityReport}); err == nil {
|
||||
if len(re) > 0 && len(re[0].Report) > 0 {
|
||||
|
|
@ -142,7 +142,7 @@ func constructScanImagePayload(ctx context.Context, event *event.ScanImageEvent,
|
|||
time.Sleep(500 * time.Millisecond)
|
||||
}
|
||||
|
||||
scanSummaries := map[string]interface{}{}
|
||||
scanSummaries := map[string]any{}
|
||||
if event.ScanType == v1.ScanTypeVulnerability {
|
||||
scanSummaries, err = scan.DefaultController.GetSummary(ctx, art, event.ScanType, []string{v1.MimeTypeNativeReport, v1.MimeTypeGenericVulnerabilityReport})
|
||||
if err != nil {
|
||||
|
|
@ -150,7 +150,7 @@ func constructScanImagePayload(ctx context.Context, event *event.ScanImageEvent,
|
|||
}
|
||||
}
|
||||
|
||||
sbomOverview := map[string]interface{}{}
|
||||
sbomOverview := map[string]any{}
|
||||
if event.ScanType == v1.ScanTypeSbom {
|
||||
sbomOverview, err = scan.DefaultController.GetSummary(ctx, art, event.ScanType, []string{v1.MimeTypeSBOMReport})
|
||||
if err != nil {
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue