mirror of https://github.com/goharbor/harbor.git
Merge branch 'main' into dependabot/npm_and_yarn/src/portal/multi-b06db9ab9b
This commit is contained in:
commit
307f78014e
|
@ -8,6 +8,9 @@
|
||||||
* Add date here... Add signature here...
|
* Add date here... Add signature here...
|
||||||
- Add your reason here...
|
- Add your reason here...
|
||||||
|
|
||||||
|
* Aug 12 2025 <yan-yw.wang@broadcom.com>
|
||||||
|
- Refresh base image
|
||||||
|
|
||||||
* Oct 24 2024 <yan-yw.wang@broadcom.com>
|
* Oct 24 2024 <yan-yw.wang@broadcom.com>
|
||||||
- Refresh base image
|
- Refresh base image
|
||||||
|
|
||||||
|
|
|
@ -46,7 +46,7 @@ jobs:
|
||||||
with:
|
with:
|
||||||
go-version: 1.23.2
|
go-version: 1.23.2
|
||||||
id: go
|
id: go
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v5
|
||||||
with:
|
with:
|
||||||
path: src/github.com/goharbor/harbor
|
path: src/github.com/goharbor/harbor
|
||||||
- name: setup env
|
- name: setup env
|
||||||
|
@ -107,7 +107,7 @@ jobs:
|
||||||
with:
|
with:
|
||||||
go-version: 1.23.2
|
go-version: 1.23.2
|
||||||
id: go
|
id: go
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v5
|
||||||
with:
|
with:
|
||||||
path: src/github.com/goharbor/harbor
|
path: src/github.com/goharbor/harbor
|
||||||
- name: setup env
|
- name: setup env
|
||||||
|
@ -148,7 +148,12 @@ jobs:
|
||||||
df -h
|
df -h
|
||||||
bash ./tests/showtime.sh ./tests/ci/api_run.sh DB $IP
|
bash ./tests/showtime.sh ./tests/ci/api_run.sh DB $IP
|
||||||
df -h
|
df -h
|
||||||
|
- name: upload_logs
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: db-api-harbor-logs.tar.gz
|
||||||
|
path: /home/runner/work/harbor/harbor/src/github.com/goharbor/harbor/integration_logs.tar.gz
|
||||||
|
retention-days: 5
|
||||||
APITEST_DB_PROXY_CACHE:
|
APITEST_DB_PROXY_CACHE:
|
||||||
env:
|
env:
|
||||||
APITEST_DB: true
|
APITEST_DB: true
|
||||||
|
@ -162,7 +167,7 @@ jobs:
|
||||||
with:
|
with:
|
||||||
go-version: 1.23.2
|
go-version: 1.23.2
|
||||||
id: go
|
id: go
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v5
|
||||||
with:
|
with:
|
||||||
path: src/github.com/goharbor/harbor
|
path: src/github.com/goharbor/harbor
|
||||||
- name: setup env
|
- name: setup env
|
||||||
|
@ -203,7 +208,12 @@ jobs:
|
||||||
df -h
|
df -h
|
||||||
bash ./tests/showtime.sh ./tests/ci/api_run.sh PROXY_CACHE $IP
|
bash ./tests/showtime.sh ./tests/ci/api_run.sh PROXY_CACHE $IP
|
||||||
df -h
|
df -h
|
||||||
|
- name: upload_logs
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: proxy-api-harbor-logs.tar.gz
|
||||||
|
path: /home/runner/work/harbor/harbor/src/github.com/goharbor/harbor/integration_logs.tar.gz
|
||||||
|
retention-days: 5
|
||||||
APITEST_LDAP:
|
APITEST_LDAP:
|
||||||
env:
|
env:
|
||||||
APITEST_LDAP: true
|
APITEST_LDAP: true
|
||||||
|
@ -217,7 +227,7 @@ jobs:
|
||||||
with:
|
with:
|
||||||
go-version: 1.23.2
|
go-version: 1.23.2
|
||||||
id: go
|
id: go
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v5
|
||||||
with:
|
with:
|
||||||
path: src/github.com/goharbor/harbor
|
path: src/github.com/goharbor/harbor
|
||||||
- name: setup env
|
- name: setup env
|
||||||
|
@ -256,7 +266,12 @@ jobs:
|
||||||
cd src/github.com/goharbor/harbor
|
cd src/github.com/goharbor/harbor
|
||||||
bash ./tests/showtime.sh ./tests/ci/api_run.sh LDAP $IP
|
bash ./tests/showtime.sh ./tests/ci/api_run.sh LDAP $IP
|
||||||
df -h
|
df -h
|
||||||
|
- name: upload_logs
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: ldap-api-harbor-logs.tar.gz
|
||||||
|
path: /home/runner/work/harbor/harbor/src/github.com/goharbor/harbor/integration_logs.tar.gz
|
||||||
|
retention-days: 5
|
||||||
OFFLINE:
|
OFFLINE:
|
||||||
env:
|
env:
|
||||||
OFFLINE: true
|
OFFLINE: true
|
||||||
|
@ -270,7 +285,7 @@ jobs:
|
||||||
with:
|
with:
|
||||||
go-version: 1.23.2
|
go-version: 1.23.2
|
||||||
id: go
|
id: go
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v5
|
||||||
with:
|
with:
|
||||||
path: src/github.com/goharbor/harbor
|
path: src/github.com/goharbor/harbor
|
||||||
- name: setup env
|
- name: setup env
|
||||||
|
@ -320,7 +335,7 @@ jobs:
|
||||||
- uses: actions/setup-node@v4
|
- uses: actions/setup-node@v4
|
||||||
with:
|
with:
|
||||||
node-version: '18'
|
node-version: '18'
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v5
|
||||||
with:
|
with:
|
||||||
path: src/github.com/goharbor/harbor
|
path: src/github.com/goharbor/harbor
|
||||||
- name: script
|
- name: script
|
||||||
|
|
|
@ -31,13 +31,13 @@ jobs:
|
||||||
with:
|
with:
|
||||||
docker_version: 20.10
|
docker_version: 20.10
|
||||||
docker_channel: stable
|
docker_channel: stable
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v5
|
||||||
- uses: jitterbit/get-changed-files@v1
|
- uses: jitterbit/get-changed-files@v1
|
||||||
id: changed-files
|
id: changed-files
|
||||||
with:
|
with:
|
||||||
format: space-delimited
|
format: space-delimited
|
||||||
token: ${{ secrets.GITHUB_TOKEN }}
|
token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v5
|
||||||
with:
|
with:
|
||||||
path: src/github.com/goharbor/harbor
|
path: src/github.com/goharbor/harbor
|
||||||
- name: Build Base Image
|
- name: Build Base Image
|
||||||
|
|
|
@ -13,7 +13,7 @@ jobs:
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v5
|
||||||
with:
|
with:
|
||||||
# We must fetch at least the immediate parents so that if this is
|
# We must fetch at least the immediate parents so that if this is
|
||||||
# a pull request then we can checkout the head.
|
# a pull request then we can checkout the head.
|
||||||
|
|
|
@ -28,7 +28,7 @@ jobs:
|
||||||
with:
|
with:
|
||||||
go-version: 1.23.2
|
go-version: 1.23.2
|
||||||
id: go
|
id: go
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v5
|
||||||
with:
|
with:
|
||||||
path: src/github.com/goharbor/harbor
|
path: src/github.com/goharbor/harbor
|
||||||
- name: before_install
|
- name: before_install
|
||||||
|
|
|
@ -21,7 +21,7 @@ jobs:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v5
|
||||||
- name: Run Trivy vulnerability scanner
|
- name: Run Trivy vulnerability scanner
|
||||||
uses: aquasecurity/trivy-action@master
|
uses: aquasecurity/trivy-action@master
|
||||||
with:
|
with:
|
||||||
|
|
|
@ -9,7 +9,7 @@ jobs:
|
||||||
release:
|
release:
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v5
|
||||||
- name: Setup env
|
- name: Setup env
|
||||||
run: |
|
run: |
|
||||||
echo "CUR_TAG=${{ github.ref_name }}" >> $GITHUB_ENV
|
echo "CUR_TAG=${{ github.ref_name }}" >> $GITHUB_ENV
|
||||||
|
|
|
@ -168,7 +168,7 @@ Harbor backend is written in [Go](http://golang.org/). If you don't have a Harbo
|
||||||
| 2.11 | 1.22.3 |
|
| 2.11 | 1.22.3 |
|
||||||
| 2.12 | 1.23.2 |
|
| 2.12 | 1.23.2 |
|
||||||
| 2.13 | 1.23.8 |
|
| 2.13 | 1.23.8 |
|
||||||
| 2.14 | 1.24.5 |
|
| 2.14 | 1.24.6 |
|
||||||
|
|
||||||
|
|
||||||
Ensure your GOPATH and PATH have been configured in accordance with the Go environment instructions.
|
Ensure your GOPATH and PATH have been configured in accordance with the Go environment instructions.
|
||||||
|
|
2
Makefile
2
Makefile
|
@ -151,7 +151,7 @@ GOINSTALL=$(GOCMD) install
|
||||||
GOTEST=$(GOCMD) test
|
GOTEST=$(GOCMD) test
|
||||||
GODEP=$(GOTEST) -i
|
GODEP=$(GOTEST) -i
|
||||||
GOFMT=gofmt -w
|
GOFMT=gofmt -w
|
||||||
GOBUILDIMAGE=golang:1.24.5
|
GOBUILDIMAGE=golang:1.24.6
|
||||||
GOBUILDPATHINCONTAINER=/harbor
|
GOBUILDPATHINCONTAINER=/harbor
|
||||||
|
|
||||||
# go build
|
# go build
|
||||||
|
|
|
@ -7462,6 +7462,12 @@ definitions:
|
||||||
type: boolean
|
type: boolean
|
||||||
description: Whether to enable copy by chunk.
|
description: Whether to enable copy by chunk.
|
||||||
x-isnullable: true
|
x-isnullable: true
|
||||||
|
single_active_replication:
|
||||||
|
type: boolean
|
||||||
|
description: |-
|
||||||
|
Whether to skip execution until the previous active execution finishes,
|
||||||
|
avoiding the execution of the same replication rules multiple times in parallel.
|
||||||
|
x-isnullable: true # make this field optional to keep backward compatibility
|
||||||
ReplicationTrigger:
|
ReplicationTrigger:
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
|
|
|
@ -176,7 +176,7 @@ log:
|
||||||
# port: 5140
|
# port: 5140
|
||||||
|
|
||||||
#This attribute is for migrator to detect the version of the .cfg file, DO NOT MODIFY!
|
#This attribute is for migrator to detect the version of the .cfg file, DO NOT MODIFY!
|
||||||
_version: 2.13.0
|
_version: 2.14.0
|
||||||
|
|
||||||
# Uncomment external_database if using external database.
|
# Uncomment external_database if using external database.
|
||||||
# external_database:
|
# external_database:
|
||||||
|
|
|
@ -7,3 +7,5 @@ ALTER SEQUENCE permission_policy_id_seq AS BIGINT;
|
||||||
ALTER TABLE role_permission ALTER COLUMN permission_policy_id TYPE BIGINT;
|
ALTER TABLE role_permission ALTER COLUMN permission_policy_id TYPE BIGINT;
|
||||||
|
|
||||||
ALTER TABLE vulnerability_record ADD COLUMN IF NOT EXISTS status text;
|
ALTER TABLE vulnerability_record ADD COLUMN IF NOT EXISTS status text;
|
||||||
|
|
||||||
|
ALTER TABLE replication_policy ADD COLUMN IF NOT EXISTS single_active_replication boolean;
|
||||||
|
|
|
@ -10,7 +10,7 @@ from migrations import accept_versions
|
||||||
@click.command()
|
@click.command()
|
||||||
@click.option('-i', '--input', 'input_', required=True, help="The path of original config file")
|
@click.option('-i', '--input', 'input_', required=True, help="The path of original config file")
|
||||||
@click.option('-o', '--output', default='', help="the path of output config file")
|
@click.option('-o', '--output', default='', help="the path of output config file")
|
||||||
@click.option('-t', '--target', default='2.13.0', help="target version of input path")
|
@click.option('-t', '--target', default='2.14.0', help="target version of input path")
|
||||||
def migrate(input_, output, target):
|
def migrate(input_, output, target):
|
||||||
"""
|
"""
|
||||||
migrate command will migrate config file style to specific version
|
migrate command will migrate config file style to specific version
|
||||||
|
|
|
@ -2,4 +2,4 @@ import os
|
||||||
|
|
||||||
MIGRATION_BASE_DIR = os.path.dirname(__file__)
|
MIGRATION_BASE_DIR = os.path.dirname(__file__)
|
||||||
|
|
||||||
accept_versions = {'1.9.0', '1.10.0', '2.0.0', '2.1.0', '2.2.0', '2.3.0', '2.4.0', '2.5.0', '2.6.0', '2.7.0', '2.8.0', '2.9.0','2.10.0', '2.11.0', '2.12.0', '2.13.0'}
|
accept_versions = {'1.9.0', '1.10.0', '2.0.0', '2.1.0', '2.2.0', '2.3.0', '2.4.0', '2.5.0', '2.6.0', '2.7.0', '2.8.0', '2.9.0','2.10.0', '2.11.0', '2.12.0', '2.13.0', '2.14.0'}
|
|
@ -0,0 +1,21 @@
|
||||||
|
import os
|
||||||
|
from jinja2 import Environment, FileSystemLoader, StrictUndefined, select_autoescape
|
||||||
|
from utils.migration import read_conf
|
||||||
|
|
||||||
|
revision = '2.14.0'
|
||||||
|
down_revisions = ['2.13.0']
|
||||||
|
|
||||||
|
def migrate(input_cfg, output_cfg):
|
||||||
|
current_dir = os.path.dirname(__file__)
|
||||||
|
tpl = Environment(
|
||||||
|
loader=FileSystemLoader(current_dir),
|
||||||
|
undefined=StrictUndefined,
|
||||||
|
trim_blocks=True,
|
||||||
|
lstrip_blocks=True,
|
||||||
|
autoescape = select_autoescape()
|
||||||
|
).get_template('harbor.yml.jinja')
|
||||||
|
|
||||||
|
config_dict = read_conf(input_cfg)
|
||||||
|
|
||||||
|
with open(output_cfg, 'w') as f:
|
||||||
|
f.write(tpl.render(**config_dict))
|
|
@ -0,0 +1,775 @@
|
||||||
|
# Configuration file of Harbor
|
||||||
|
|
||||||
|
# The IP address or hostname to access admin UI and registry service.
|
||||||
|
# DO NOT use localhost or 127.0.0.1, because Harbor needs to be accessed by external clients.
|
||||||
|
hostname: {{ hostname }}
|
||||||
|
|
||||||
|
# http related config
|
||||||
|
{% if http is defined %}
|
||||||
|
http:
|
||||||
|
# port for http, default is 80. If https enabled, this port will redirect to https port
|
||||||
|
port: {{ http.port }}
|
||||||
|
{% else %}
|
||||||
|
# http:
|
||||||
|
# # port for http, default is 80. If https enabled, this port will redirect to https port
|
||||||
|
# port: 80
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{% if https is defined %}
|
||||||
|
# https related config
|
||||||
|
https:
|
||||||
|
# https port for harbor, default is 443
|
||||||
|
port: {{ https.port }}
|
||||||
|
# The path of cert and key files for nginx
|
||||||
|
certificate: {{ https.certificate }}
|
||||||
|
private_key: {{ https.private_key }}
|
||||||
|
# enable strong ssl ciphers (default: false)
|
||||||
|
{% if strong_ssl_ciphers is defined %}
|
||||||
|
strong_ssl_ciphers: {{ strong_ssl_ciphers | lower }}
|
||||||
|
{% else %}
|
||||||
|
strong_ssl_ciphers: false
|
||||||
|
{% endif %}
|
||||||
|
{% else %}
|
||||||
|
# https related config
|
||||||
|
# https:
|
||||||
|
# # https port for harbor, default is 443
|
||||||
|
# port: 443
|
||||||
|
# # The path of cert and key files for nginx
|
||||||
|
# certificate: /your/certificate/path
|
||||||
|
# private_key: /your/private/key/path
|
||||||
|
# enable strong ssl ciphers (default: false)
|
||||||
|
# strong_ssl_ciphers: false
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
# # Harbor will set ipv4 enabled only by default if this block is not configured
|
||||||
|
# # Otherwise, please uncomment this block to configure your own ip_family stacks
|
||||||
|
{% if ip_family is defined %}
|
||||||
|
ip_family:
|
||||||
|
# ipv6Enabled set to true if ipv6 is enabled in docker network, currently it affected the nginx related component
|
||||||
|
{% if ip_family.ipv6 is defined %}
|
||||||
|
ipv6:
|
||||||
|
enabled: {{ ip_family.ipv6.enabled | lower }}
|
||||||
|
{% else %}
|
||||||
|
ipv6:
|
||||||
|
enabled: false
|
||||||
|
{% endif %}
|
||||||
|
# ipv4Enabled set to true by default, currently it affected the nginx related component
|
||||||
|
{% if ip_family.ipv4 is defined %}
|
||||||
|
ipv4:
|
||||||
|
enabled: {{ ip_family.ipv4.enabled | lower }}
|
||||||
|
{% else %}
|
||||||
|
ipv4:
|
||||||
|
enabled: true
|
||||||
|
{% endif %}
|
||||||
|
{% else %}
|
||||||
|
# ip_family:
|
||||||
|
# # ipv6Enabled set to true if ipv6 is enabled in docker network, currently it affected the nginx related component
|
||||||
|
# ipv6:
|
||||||
|
# enabled: false
|
||||||
|
# # ipv4Enabled set to true by default, currently it affected the nginx related component
|
||||||
|
# ipv4:
|
||||||
|
# enabled: true
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{% if internal_tls is defined %}
|
||||||
|
# Uncomment following will enable tls communication between all harbor components
|
||||||
|
internal_tls:
|
||||||
|
# set enabled to true means internal tls is enabled
|
||||||
|
enabled: {{ internal_tls.enabled | lower }}
|
||||||
|
{% if internal_tls.dir is defined %}
|
||||||
|
# put your cert and key files on dir
|
||||||
|
dir: {{ internal_tls.dir }}
|
||||||
|
{% endif %}
|
||||||
|
{% else %}
|
||||||
|
# internal_tls:
|
||||||
|
# # set enabled to true means internal tls is enabled
|
||||||
|
# enabled: true
|
||||||
|
# # put your cert and key files on dir
|
||||||
|
# dir: /etc/harbor/tls/internal
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
# Uncomment external_url if you want to enable external proxy
|
||||||
|
# And when it enabled the hostname will no longer used
|
||||||
|
{% if external_url is defined %}
|
||||||
|
external_url: {{ external_url }}
|
||||||
|
{% else %}
|
||||||
|
# external_url: https://reg.mydomain.com:8433
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
# The initial password of Harbor admin
|
||||||
|
# It only works in first time to install harbor
|
||||||
|
# Remember Change the admin password from UI after launching Harbor.
|
||||||
|
{% if harbor_admin_password is defined %}
|
||||||
|
harbor_admin_password: {{ harbor_admin_password }}
|
||||||
|
{% else %}
|
||||||
|
harbor_admin_password: Harbor12345
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
# Harbor DB configuration
|
||||||
|
database:
|
||||||
|
{% if database is defined %}
|
||||||
|
# The password for the root user of Harbor DB. Change this before any production use.
|
||||||
|
password: {{ database.password}}
|
||||||
|
# The maximum number of connections in the idle connection pool. If it <=0, no idle connections are retained.
|
||||||
|
max_idle_conns: {{ database.max_idle_conns }}
|
||||||
|
# The maximum number of open connections to the database. If it <= 0, then there is no limit on the number of open connections.
|
||||||
|
# Note: the default number of connections is 1024 for postgres of harbor.
|
||||||
|
max_open_conns: {{ database.max_open_conns }}
|
||||||
|
# The maximum amount of time a connection may be reused. Expired connections may be closed lazily before reuse. If it <= 0, connections are not closed due to a connection's age.
|
||||||
|
# The value is a duration string. A duration string is a possibly signed sequence of decimal numbers, each with optional fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
|
||||||
|
{% if database.conn_max_lifetime is defined %}
|
||||||
|
conn_max_lifetime: {{ database.conn_max_lifetime }}
|
||||||
|
{% else %}
|
||||||
|
conn_max_lifetime: 5m
|
||||||
|
{% endif %}
|
||||||
|
# The maximum amount of time a connection may be idle. Expired connections may be closed lazily before reuse. If it <= 0, connections are not closed due to a connection's idle time.
|
||||||
|
# The value is a duration string. A duration string is a possibly signed sequence of decimal numbers, each with optional fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
|
||||||
|
{% if database.conn_max_idle_time is defined %}
|
||||||
|
conn_max_idle_time: {{ database.conn_max_idle_time }}
|
||||||
|
{% else %}
|
||||||
|
conn_max_idle_time: 0
|
||||||
|
{% endif %}
|
||||||
|
{% else %}
|
||||||
|
# The password for the root user of Harbor DB. Change this before any production use.
|
||||||
|
password: root123
|
||||||
|
# The maximum number of connections in the idle connection pool. If it <=0, no idle connections are retained.
|
||||||
|
max_idle_conns: 100
|
||||||
|
# The maximum number of open connections to the database. If it <= 0, then there is no limit on the number of open connections.
|
||||||
|
# Note: the default number of connections is 1024 for postgres of harbor.
|
||||||
|
max_open_conns: 900
|
||||||
|
# The maximum amount of time a connection may be reused. Expired connections may be closed lazily before reuse. If it <= 0, connections are not closed due to a connection's age.
|
||||||
|
# The value is a duration string. A duration string is a possibly signed sequence of decimal numbers, each with optional fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
|
||||||
|
conn_max_lifetime: 5m
|
||||||
|
# The maximum amount of time a connection may be idle. Expired connections may be closed lazily before reuse. If it <= 0, connections are not closed due to a connection's idle time.
|
||||||
|
# The value is a duration string. A duration string is a possibly signed sequence of decimal numbers, each with optional fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
|
||||||
|
conn_max_idle_time: 0
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{% if data_volume is defined %}
|
||||||
|
# The default data volume
|
||||||
|
data_volume: {{ data_volume }}
|
||||||
|
{% else %}
|
||||||
|
# The default data volume
|
||||||
|
data_volume: /data
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
# Harbor Storage settings by default is using /data dir on local filesystem
|
||||||
|
# Uncomment storage_service setting If you want to using external storage
|
||||||
|
{% if storage_service is defined %}
|
||||||
|
storage_service:
|
||||||
|
{% for key, value in storage_service.items() %}
|
||||||
|
{% if key == 'ca_bundle' %}
|
||||||
|
# # ca_bundle is the path to the custom root ca certificate, which will be injected into the truststore
|
||||||
|
# # of registry's and chart repository's containers. This is usually needed when the user hosts a internal storage with self signed certificate.
|
||||||
|
ca_bundle: {{ value if value is not none else '' }}
|
||||||
|
{% elif key == 'redirect' %}
|
||||||
|
# # set disable to true when you want to disable registry redirect
|
||||||
|
redirect:
|
||||||
|
{% if storage_service.redirect.disabled is defined %}
|
||||||
|
disable: {{ storage_service.redirect.disabled | lower}}
|
||||||
|
{% else %}
|
||||||
|
disable: {{ storage_service.redirect.disable | lower}}
|
||||||
|
{% endif %}
|
||||||
|
{% else %}
|
||||||
|
# # storage backend, default is filesystem, options include filesystem, azure, gcs, s3, swift and oss
|
||||||
|
# # for more info about this configuration please refer https://distribution.github.io/distribution/about/configuration/
|
||||||
|
# # and https://distribution.github.io/distribution/storage-drivers/
|
||||||
|
{{ key }}:
|
||||||
|
{% for k, v in value.items() %}
|
||||||
|
{{ k }}: {{ v if v is not none else '' }}
|
||||||
|
{% endfor %}
|
||||||
|
{% endif %}
|
||||||
|
{% endfor %}
|
||||||
|
{% else %}
|
||||||
|
# storage_service:
|
||||||
|
# # ca_bundle is the path to the custom root ca certificate, which will be injected into the truststore
|
||||||
|
# # of registry's and chart repository's containers. This is usually needed when the user hosts a internal storage with self signed certificate.
|
||||||
|
# ca_bundle:
|
||||||
|
|
||||||
|
# # storage backend, default is filesystem, options include filesystem, azure, gcs, s3, swift and oss
|
||||||
|
# # for more info about this configuration please refer https://distribution.github.io/distribution/about/configuration/
|
||||||
|
# # and https://distribution.github.io/distribution/storage-drivers/
|
||||||
|
# filesystem:
|
||||||
|
# maxthreads: 100
|
||||||
|
# # set disable to true when you want to disable registry redirect
|
||||||
|
# redirect:
|
||||||
|
# disable: false
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
# Trivy configuration
|
||||||
|
#
|
||||||
|
# Trivy DB contains vulnerability information from NVD, Red Hat, and many other upstream vulnerability databases.
|
||||||
|
# It is downloaded by Trivy from the GitHub release page https://github.com/aquasecurity/trivy-db/releases and cached
|
||||||
|
# in the local file system. In addition, the database contains the update timestamp so Trivy can detect whether it
|
||||||
|
# should download a newer version from the Internet or use the cached one. Currently, the database is updated every
|
||||||
|
# 12 hours and published as a new release to GitHub.
|
||||||
|
{% if trivy is defined %}
|
||||||
|
trivy:
|
||||||
|
# ignoreUnfixed The flag to display only fixed vulnerabilities
|
||||||
|
{% if trivy.ignore_unfixed is defined %}
|
||||||
|
ignore_unfixed: {{ trivy.ignore_unfixed | lower }}
|
||||||
|
{% else %}
|
||||||
|
ignore_unfixed: false
|
||||||
|
{% endif %}
|
||||||
|
# skipUpdate The flag to enable or disable Trivy DB downloads from GitHub
|
||||||
|
#
|
||||||
|
# You might want to enable this flag in test or CI/CD environments to avoid GitHub rate limiting issues.
|
||||||
|
# If the flag is enabled you have to download the `trivy-offline.tar.gz` archive manually, extract `trivy.db` and
|
||||||
|
# `metadata.json` files and mount them in the `/home/scanner/.cache/trivy/db` path.
|
||||||
|
{% if trivy.skip_update is defined %}
|
||||||
|
skip_update: {{ trivy.skip_update | lower }}
|
||||||
|
{% else %}
|
||||||
|
skip_update: false
|
||||||
|
{% endif %}
|
||||||
|
{% if trivy.skip_java_db_update is defined %}
|
||||||
|
# skipJavaDBUpdate If the flag is enabled you have to manually download the `trivy-java.db` file and mount it in the
|
||||||
|
# `/home/scanner/.cache/trivy/java-db/trivy-java.db` path
|
||||||
|
skip_java_db_update: {{ trivy.skip_java_db_update | lower }}
|
||||||
|
{% else %}
|
||||||
|
skip_java_db_update: false
|
||||||
|
{% endif %}
|
||||||
|
#
|
||||||
|
{% if trivy.offline_scan is defined %}
|
||||||
|
offline_scan: {{ trivy.offline_scan | lower }}
|
||||||
|
{% else %}
|
||||||
|
offline_scan: false
|
||||||
|
{% endif %}
|
||||||
|
#
|
||||||
|
# Comma-separated list of what security issues to detect. Possible values are `vuln`, `config` and `secret`. Defaults to `vuln`.
|
||||||
|
{% if trivy.security_check is defined %}
|
||||||
|
security_check: {{ trivy.security_check }}
|
||||||
|
{% else %}
|
||||||
|
security_check: vuln
|
||||||
|
{% endif %}
|
||||||
|
#
|
||||||
|
# insecure The flag to skip verifying registry certificate
|
||||||
|
{% if trivy.insecure is defined %}
|
||||||
|
insecure: {{ trivy.insecure | lower }}
|
||||||
|
{% else %}
|
||||||
|
insecure: false
|
||||||
|
{% endif %}
|
||||||
|
#
|
||||||
|
{% if trivy.timeout is defined %}
|
||||||
|
# timeout The duration to wait for scan completion.
|
||||||
|
# There is upper bound of 30 minutes defined in scan job. So if this `timeout` is larger than 30m0s, it will also timeout at 30m0s.
|
||||||
|
timeout: {{ trivy.timeout}}
|
||||||
|
{% else %}
|
||||||
|
timeout: 5m0s
|
||||||
|
{% endif %}
|
||||||
|
#
|
||||||
|
# github_token The GitHub access token to download Trivy DB
|
||||||
|
#
|
||||||
|
# Anonymous downloads from GitHub are subject to the limit of 60 requests per hour. Normally such rate limit is enough
|
||||||
|
# for production operations. If, for any reason, it's not enough, you could increase the rate limit to 5000
|
||||||
|
# requests per hour by specifying the GitHub access token. For more details on GitHub rate limiting please consult
|
||||||
|
# https://developer.github.com/v3/#rate-limiting
|
||||||
|
#
|
||||||
|
# You can create a GitHub token by following the instructions in
|
||||||
|
# https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line
|
||||||
|
#
|
||||||
|
{% if trivy.github_token is defined %}
|
||||||
|
github_token: {{ trivy.github_token }}
|
||||||
|
{% else %}
|
||||||
|
# github_token: xxx
|
||||||
|
{% endif %}
|
||||||
|
{% else %}
|
||||||
|
# trivy:
|
||||||
|
# # ignoreUnfixed The flag to display only fixed vulnerabilities
|
||||||
|
# ignore_unfixed: false
|
||||||
|
# # skipUpdate The flag to enable or disable Trivy DB downloads from GitHub
|
||||||
|
# #
|
||||||
|
# # You might want to enable this flag in test or CI/CD environments to avoid GitHub rate limiting issues.
|
||||||
|
# # If the flag is enabled you have to download the `trivy-offline.tar.gz` archive manually, extract `trivy.db` and
|
||||||
|
# # `metadata.json` files and mount them in the `/home/scanner/.cache/trivy/db` path.
|
||||||
|
# skip_update: false
|
||||||
|
# #
|
||||||
|
# # skipJavaDBUpdate If the flag is enabled you have to manually download the `trivy-java.db` file and mount it in the
|
||||||
|
# # `/home/scanner/.cache/trivy/java-db/trivy-java.db` path
|
||||||
|
# skip_java_db_update: false
|
||||||
|
# #
|
||||||
|
# #The offline_scan option prevents Trivy from sending API requests to identify dependencies.
|
||||||
|
# # Scanning JAR files and pom.xml may require Internet access for better detection, but this option tries to avoid it.
|
||||||
|
# # For example, the offline mode will not try to resolve transitive dependencies in pom.xml when the dependency doesn't
|
||||||
|
# # exist in the local repositories. It means a number of detected vulnerabilities might be fewer in offline mode.
|
||||||
|
# # It would work if all the dependencies are in local.
|
||||||
|
# # This option doesn’t affect DB download. You need to specify "skip-update" as well as "offline-scan" in an air-gapped environment.
|
||||||
|
# offline_scan: false
|
||||||
|
# #
|
||||||
|
# # insecure The flag to skip verifying registry certificate
|
||||||
|
# insecure: false
|
||||||
|
# # github_token The GitHub access token to download Trivy DB
|
||||||
|
# #
|
||||||
|
# # Anonymous downloads from GitHub are subject to the limit of 60 requests per hour. Normally such rate limit is enough
|
||||||
|
# # for production operations. If, for any reason, it's not enough, you could increase the rate limit to 5000
|
||||||
|
# # requests per hour by specifying the GitHub access token. For more details on GitHub rate limiting please consult
|
||||||
|
# # https://developer.github.com/v3/#rate-limiting
|
||||||
|
# #
|
||||||
|
# # timeout The duration to wait for scan completion.
|
||||||
|
# # There is upper bound of 30 minutes defined in scan job. So if this `timeout` is larger than 30m0s, it will also timeout at 30m0s.
|
||||||
|
# timeout: 5m0s
|
||||||
|
# #
|
||||||
|
# # You can create a GitHub token by following the instructions in
|
||||||
|
# # https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line
|
||||||
|
# #
|
||||||
|
# # github_token: xxx
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
jobservice:
|
||||||
|
# Maximum number of job workers in job service
|
||||||
|
{% if jobservice is defined %}
|
||||||
|
max_job_workers: {{ jobservice.max_job_workers }}
|
||||||
|
# Maximum hours of task duration in job service, default 24
|
||||||
|
{% if jobservice.max_job_duration_hours is defined %}
|
||||||
|
max_job_duration_hours: {{ jobservice.max_job_duration_hours }}
|
||||||
|
{% else %}
|
||||||
|
max_job_duration_hours: 24
|
||||||
|
{% endif %}
|
||||||
|
# The jobLoggers backend name, only support "STD_OUTPUT", "FILE" and/or "DB"
|
||||||
|
{% if jobservice.job_loggers is defined %}
|
||||||
|
job_loggers:
|
||||||
|
{% for job_logger in jobservice.job_loggers %}
|
||||||
|
- {{job_logger}}
|
||||||
|
{% endfor %}
|
||||||
|
{% else %}
|
||||||
|
job_loggers:
|
||||||
|
- STD_OUTPUT
|
||||||
|
- FILE
|
||||||
|
# - DB
|
||||||
|
{% endif %}
|
||||||
|
# The jobLogger sweeper duration (ignored if `jobLogger` is `stdout`)
|
||||||
|
{% if jobservice.logger_sweeper_duration is defined %}
|
||||||
|
logger_sweeper_duration: {{ jobservice.logger_sweeper_duration }}
|
||||||
|
{% else %}
|
||||||
|
logger_sweeper_duration: 1
|
||||||
|
{% endif %}
|
||||||
|
{% else %}
|
||||||
|
max_job_workers: 10
|
||||||
|
max_job_duration_hours: 24
|
||||||
|
# The jobLoggers backend name, only support "STD_OUTPUT", "FILE" and/or "DB"
|
||||||
|
job_loggers:
|
||||||
|
- STD_OUTPUT
|
||||||
|
- FILE
|
||||||
|
# - DB
|
||||||
|
# The jobLogger sweeper duration (ignored if `jobLogger` is `stdout`)
|
||||||
|
logger_sweeper_duration: 1
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
notification:
|
||||||
|
# Maximum retry count for webhook job
|
||||||
|
{% if notification is defined %}
|
||||||
|
webhook_job_max_retry: {{ notification.webhook_job_max_retry}}
|
||||||
|
# HTTP client timeout for webhook job
|
||||||
|
{% if notification.webhook_job_http_client_timeout is defined %}
|
||||||
|
webhook_job_http_client_timeout: {{ notification.webhook_job_http_client_timeout }}
|
||||||
|
{% else %}
|
||||||
|
webhook_job_http_client_timeout: 3 #seconds
|
||||||
|
{% endif %}
|
||||||
|
{% else %}
|
||||||
|
webhook_job_max_retry: 3
|
||||||
|
# HTTP client timeout for webhook job
|
||||||
|
webhook_job_http_client_timeout: 3 #seconds
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
# Log configurations
|
||||||
|
log:
|
||||||
|
# options are debug, info, warning, error, fatal
|
||||||
|
{% if log is defined %}
|
||||||
|
level: {{ log.level }}
|
||||||
|
# configs for logs in local storage
|
||||||
|
local:
|
||||||
|
# Log files are rotated log_rotate_count times before being removed. If count is 0, old versions are removed rather than rotated.
|
||||||
|
rotate_count: {{ log.local.rotate_count }}
|
||||||
|
# Log files are rotated only if they grow bigger than log_rotate_size bytes. If size is followed by k, the size is assumed to be in kilobytes.
|
||||||
|
# If the M is used, the size is in megabytes, and if G is used, the size is in gigabytes. So size 100, size 100k, size 100M and size 100G
|
||||||
|
# are all valid.
|
||||||
|
rotate_size: {{ log.local.rotate_size }}
|
||||||
|
# The directory on your host that store log
|
||||||
|
location: {{ log.local.location }}
|
||||||
|
{% if log.external_endpoint is defined %}
|
||||||
|
external_endpoint:
|
||||||
|
# protocol used to transmit log to external endpoint, options is tcp or udp
|
||||||
|
protocol: {{ log.external_endpoint.protocol }}
|
||||||
|
# The host of external endpoint
|
||||||
|
host: {{ log.external_endpoint.host }}
|
||||||
|
# Port of external endpoint
|
||||||
|
port: {{ log.external_endpoint.port }}
|
||||||
|
{% else %}
|
||||||
|
# Uncomment following lines to enable external syslog endpoint.
|
||||||
|
# external_endpoint:
|
||||||
|
# # protocol used to transmit log to external endpoint, options is tcp or udp
|
||||||
|
# protocol: tcp
|
||||||
|
# # The host of external endpoint
|
||||||
|
# host: localhost
|
||||||
|
# # Port of external endpoint
|
||||||
|
# port: 5140
|
||||||
|
{% endif %}
|
||||||
|
{% else %}
|
||||||
|
level: info
|
||||||
|
# configs for logs in local storage
|
||||||
|
local:
|
||||||
|
# Log files are rotated log_rotate_count times before being removed. If count is 0, old versions are removed rather than rotated.
|
||||||
|
rotate_count: 50
|
||||||
|
# Log files are rotated only if they grow bigger than log_rotate_size bytes. If size is followed by k, the size is assumed to be in kilobytes.
|
||||||
|
# If the M is used, the size is in megabytes, and if G is used, the size is in gigabytes. So size 100, size 100k, size 100M and size 100G
|
||||||
|
# are all valid.
|
||||||
|
rotate_size: 200M
|
||||||
|
# The directory on your host that store log
|
||||||
|
location: /var/log/harbor
|
||||||
|
|
||||||
|
# Uncomment following lines to enable external syslog endpoint.
|
||||||
|
# external_endpoint:
|
||||||
|
# # protocol used to transmit log to external endpoint, options is tcp or udp
|
||||||
|
# protocol: tcp
|
||||||
|
# # The host of external endpoint
|
||||||
|
# host: localhost
|
||||||
|
# # Port of external endpoint
|
||||||
|
# port: 5140
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
|
||||||
|
#This attribute is for migrator to detect the version of the .cfg file, DO NOT MODIFY!
|
||||||
|
_version: 2.14.0
|
||||||
|
{% if external_database is defined %}
|
||||||
|
# Uncomment external_database if using external database.
|
||||||
|
external_database:
|
||||||
|
harbor:
|
||||||
|
host: {{ external_database.harbor.host }}
|
||||||
|
port: {{ external_database.harbor.port }}
|
||||||
|
db_name: {{ external_database.harbor.db_name }}
|
||||||
|
username: {{ external_database.harbor.username }}
|
||||||
|
password: {{ external_database.harbor.password }}
|
||||||
|
ssl_mode: {{ external_database.harbor.ssl_mode }}
|
||||||
|
max_idle_conns: {{ external_database.harbor.max_idle_conns}}
|
||||||
|
max_open_conns: {{ external_database.harbor.max_open_conns}}
|
||||||
|
{% else %}
|
||||||
|
# Uncomment external_database if using external database.
|
||||||
|
# external_database:
|
||||||
|
# harbor:
|
||||||
|
# host: harbor_db_host
|
||||||
|
# port: harbor_db_port
|
||||||
|
# db_name: harbor_db_name
|
||||||
|
# username: harbor_db_username
|
||||||
|
# password: harbor_db_password
|
||||||
|
# ssl_mode: disable
|
||||||
|
# max_idle_conns: 2
|
||||||
|
# max_open_conns: 0
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{% if redis is defined %}
|
||||||
|
redis:
|
||||||
|
# # db_index 0 is for core, it's unchangeable
|
||||||
|
{% if redis.registry_db_index is defined %}
|
||||||
|
registry_db_index: {{ redis.registry_db_index }}
|
||||||
|
{% else %}
|
||||||
|
# # registry_db_index: 1
|
||||||
|
{% endif %}
|
||||||
|
{% if redis.jobservice_db_index is defined %}
|
||||||
|
jobservice_db_index: {{ redis.jobservice_db_index }}
|
||||||
|
{% else %}
|
||||||
|
# # jobservice_db_index: 2
|
||||||
|
{% endif %}
|
||||||
|
{% if redis.trivy_db_index is defined %}
|
||||||
|
trivy_db_index: {{ redis.trivy_db_index }}
|
||||||
|
{% else %}
|
||||||
|
# # trivy_db_index: 5
|
||||||
|
{% endif %}
|
||||||
|
{% if redis.harbor_db_index is defined %}
|
||||||
|
harbor_db_index: {{ redis.harbor_db_index }}
|
||||||
|
{% else %}
|
||||||
|
# # it's optional, the db for harbor business misc, by default is 0, uncomment it if you want to change it.
|
||||||
|
# # harbor_db_index: 6
|
||||||
|
{% endif %}
|
||||||
|
{% if redis.cache_layer_db_index is defined %}
|
||||||
|
cache_layer_db_index: {{ redis.cache_layer_db_index }}
|
||||||
|
{% else %}
|
||||||
|
# # it's optional, the db for harbor cache layer, by default is 0, uncomment it if you want to change it.
|
||||||
|
# # cache_layer_db_index: 7
|
||||||
|
{% endif %}
|
||||||
|
{% else %}
|
||||||
|
# Uncomment redis if need to customize redis db
|
||||||
|
# redis:
|
||||||
|
# # db_index 0 is for core, it's unchangeable
|
||||||
|
# # registry_db_index: 1
|
||||||
|
# # jobservice_db_index: 2
|
||||||
|
# # trivy_db_index: 5
|
||||||
|
# # it's optional, the db for harbor business misc, by default is 0, uncomment it if you want to change it.
|
||||||
|
# # harbor_db_index: 6
|
||||||
|
# # it's optional, the db for harbor cache layer, by default is 0, uncomment it if you want to change it.
|
||||||
|
# # cache_layer_db_index: 7
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{% if external_redis is defined %}
|
||||||
|
external_redis:
|
||||||
|
# support redis, redis+sentinel
|
||||||
|
# host for redis: <host_redis>:<port_redis>
|
||||||
|
# host for redis+sentinel:
|
||||||
|
# <host_sentinel1>:<port_sentinel1>,<host_sentinel2>:<port_sentinel2>,<host_sentinel3>:<port_sentinel3>
|
||||||
|
host: {{ external_redis.host }}
|
||||||
|
password: {{ external_redis.password }}
|
||||||
|
# Redis AUTH command was extended in Redis 6, it is possible to use it in the two-arguments AUTH <username> <password> form.
|
||||||
|
{% if external_redis.username is defined %}
|
||||||
|
username: {{ external_redis.username }}
|
||||||
|
{% else %}
|
||||||
|
# username:
|
||||||
|
{% endif %}
|
||||||
|
# sentinel_master_set must be set to support redis+sentinel
|
||||||
|
#sentinel_master_set:
|
||||||
|
{% if external_redis.tlsOptions is defined %}
|
||||||
|
# # tls configuration for redis connection
|
||||||
|
# # only server-authentication is supported
|
||||||
|
# # mtls for redis connection is not supported
|
||||||
|
# # tls connection will be disable by default
|
||||||
|
tlsOptions:
|
||||||
|
enable: {{ external_redis.tlsOptions.enable }}
|
||||||
|
# if it is a self-signed ca, please set the ca path specifically.
|
||||||
|
{% if external_redis.tlsOptions.rootCA is defined %}
|
||||||
|
rootCA: {{ external_redis.tlsOptions.rootCA }}
|
||||||
|
{% else %}
|
||||||
|
# rootCA:
|
||||||
|
{% endif %}
|
||||||
|
{% else %}
|
||||||
|
# # tls configuration for redis connection
|
||||||
|
# # only server-authentication is supported
|
||||||
|
# # mtls for redis connection is not supported
|
||||||
|
# # tls connection will be disable by default
|
||||||
|
# tlsOptions:
|
||||||
|
# enable: false
|
||||||
|
# # if it is a self-signed ca, please set the ca path specifically.
|
||||||
|
# rootCA:
|
||||||
|
{% endif %}
|
||||||
|
# db_index 0 is for core, it's unchangeable
|
||||||
|
registry_db_index: {{ external_redis.registry_db_index }}
|
||||||
|
jobservice_db_index: {{ external_redis.jobservice_db_index }}
|
||||||
|
trivy_db_index: 5
|
||||||
|
idle_timeout_seconds: 30
|
||||||
|
{% if external_redis.harbor_db_index is defined %}
|
||||||
|
harbor_db_index: {{ redis.harbor_db_index }}
|
||||||
|
{% else %}
|
||||||
|
# # it's optional, the db for harbor business misc, by default is 0, uncomment it if you want to change it.
|
||||||
|
# # harbor_db_index: 6
|
||||||
|
{% endif %}
|
||||||
|
{% if external_redis.cache_layer_db_index is defined %}
|
||||||
|
cache_layer_db_index: {{ redis.cache_layer_db_index }}
|
||||||
|
{% else %}
|
||||||
|
# # it's optional, the db for harbor cache layer, by default is 0, uncomment it if you want to change it.
|
||||||
|
# # cache_layer_db_index: 7
|
||||||
|
{% endif %}
|
||||||
|
{% else %}
|
||||||
|
# Uncomments external_redis if using external Redis server
|
||||||
|
# external_redis:
|
||||||
|
# # support redis, redis+sentinel
|
||||||
|
# # host for redis: <host_redis>:<port_redis>
|
||||||
|
# # host for redis+sentinel:
|
||||||
|
# # <host_sentinel1>:<port_sentinel1>,<host_sentinel2>:<port_sentinel2>,<host_sentinel3>:<port_sentinel3>
|
||||||
|
# host: redis:6379
|
||||||
|
# password:
|
||||||
|
# # Redis AUTH command was extended in Redis 6, it is possible to use it in the two-arguments AUTH <username> <password> form.
|
||||||
|
# # username:
|
||||||
|
# # sentinel_master_set must be set to support redis+sentinel
|
||||||
|
# #sentinel_master_set:
|
||||||
|
# # tls configuration for redis connection
|
||||||
|
# # only server-authentication is supported
|
||||||
|
# # mtls for redis connection is not supported
|
||||||
|
# # tls connection will be disable by default
|
||||||
|
# tlsOptions:
|
||||||
|
# enable: false
|
||||||
|
# # if it is a self-signed ca, please set the ca path specifically.
|
||||||
|
# rootCA:
|
||||||
|
# # db_index 0 is for core, it's unchangeable
|
||||||
|
# registry_db_index: 1
|
||||||
|
# jobservice_db_index: 2
|
||||||
|
# trivy_db_index: 5
|
||||||
|
# idle_timeout_seconds: 30
|
||||||
|
# # it's optional, the db for harbor business misc, by default is 0, uncomment it if you want to change it.
|
||||||
|
# # harbor_db_index: 6
|
||||||
|
# # it's optional, the db for harbor cache layer, by default is 0, uncomment it if you want to change it.
|
||||||
|
# # cache_layer_db_index: 7
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{% if uaa is defined %}
|
||||||
|
# Uncomment uaa for trusting the certificate of uaa instance that is hosted via self-signed cert.
|
||||||
|
uaa:
|
||||||
|
ca_file: {{ uaa.ca_file }}
|
||||||
|
{% else %}
|
||||||
|
# Uncomment uaa for trusting the certificate of uaa instance that is hosted via self-signed cert.
|
||||||
|
# uaa:
|
||||||
|
# ca_file: /path/to/ca
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
|
||||||
|
# Global proxy
|
||||||
|
# Config http proxy for components, e.g. http://my.proxy.com:3128
|
||||||
|
# Components doesn't need to connect to each others via http proxy.
|
||||||
|
# Remove component from `components` array if want disable proxy
|
||||||
|
# for it. If you want use proxy for replication, MUST enable proxy
|
||||||
|
# for core and jobservice, and set `http_proxy` and `https_proxy`.
|
||||||
|
# Add domain to the `no_proxy` field, when you want disable proxy
|
||||||
|
# for some special registry.
|
||||||
|
{% if proxy is defined %}
|
||||||
|
proxy:
|
||||||
|
http_proxy: {{ proxy.http_proxy or ''}}
|
||||||
|
https_proxy: {{ proxy.https_proxy or ''}}
|
||||||
|
no_proxy: {{ proxy.no_proxy or ''}}
|
||||||
|
{% if proxy.components is defined %}
|
||||||
|
components:
|
||||||
|
{% for component in proxy.components %}
|
||||||
|
{% if component != 'clair' %}
|
||||||
|
- {{component}}
|
||||||
|
{% endif %}
|
||||||
|
{% endfor %}
|
||||||
|
{% endif %}
|
||||||
|
{% else %}
|
||||||
|
proxy:
|
||||||
|
http_proxy:
|
||||||
|
https_proxy:
|
||||||
|
no_proxy:
|
||||||
|
components:
|
||||||
|
- core
|
||||||
|
- jobservice
|
||||||
|
- trivy
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{% if metric is defined %}
|
||||||
|
metric:
|
||||||
|
enabled: {{ metric.enabled }}
|
||||||
|
port: {{ metric.port }}
|
||||||
|
path: {{ metric.path }}
|
||||||
|
{% else %}
|
||||||
|
# metric:
|
||||||
|
# enabled: false
|
||||||
|
# port: 9090
|
||||||
|
# path: /metrics
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
# Trace related config
|
||||||
|
# only can enable one trace provider(jaeger or otel) at the same time,
|
||||||
|
# and when using jaeger as provider, can only enable it with agent mode or collector mode.
|
||||||
|
# if using jaeger collector mode, uncomment endpoint and uncomment username, password if needed
|
||||||
|
# if using jaeger agetn mode uncomment agent_host and agent_port
|
||||||
|
{% if trace is defined %}
|
||||||
|
trace:
|
||||||
|
enabled: {{ trace.enabled | lower}}
|
||||||
|
sample_rate: {{ trace.sample_rate }}
|
||||||
|
# # namespace used to differentiate different harbor services
|
||||||
|
{% if trace.namespace is defined %}
|
||||||
|
namespace: {{ trace.namespace }}
|
||||||
|
{% else %}
|
||||||
|
# namespace:
|
||||||
|
{% endif %}
|
||||||
|
# # attributes is a key value dict contains user defined attributes used to initialize trace provider
|
||||||
|
{% if trace.attributes is defined%}
|
||||||
|
attributes:
|
||||||
|
{% for name, value in trace.attributes.items() %}
|
||||||
|
{{name}}: {{value}}
|
||||||
|
{% endfor %}
|
||||||
|
{% else %}
|
||||||
|
# attributes:
|
||||||
|
# application: harbor
|
||||||
|
{% endif %}
|
||||||
|
{% if trace.jaeger is defined%}
|
||||||
|
jaeger:
|
||||||
|
endpoint: {{trace.jaeger.endpoint or '' }}
|
||||||
|
username: {{trace.jaeger.username or ''}}
|
||||||
|
password: {{trace.jaeger.password or ''}}
|
||||||
|
agent_host: {{trace.jaeger.agent_host or ''}}
|
||||||
|
agent_port: {{trace.jaeger.agent_port or ''}}
|
||||||
|
{% else %}
|
||||||
|
# jaeger:
|
||||||
|
# endpoint:
|
||||||
|
# username:
|
||||||
|
# password:
|
||||||
|
# agent_host:
|
||||||
|
# agent_port:
|
||||||
|
{% endif %}
|
||||||
|
{% if trace. otel is defined %}
|
||||||
|
otel:
|
||||||
|
endpoint: {{trace.otel.endpoint or '' }}
|
||||||
|
url_path: {{trace.otel.url_path or '' }}
|
||||||
|
compression: {{trace.otel.compression | lower }}
|
||||||
|
insecure: {{trace.otel.insecure | lower }}
|
||||||
|
timeout: {{trace.otel.timeout or '' }}
|
||||||
|
{% else %}
|
||||||
|
# otel:
|
||||||
|
# endpoint: hostname:4318
|
||||||
|
# url_path: /v1/traces
|
||||||
|
# compression: false
|
||||||
|
# insecure: true
|
||||||
|
# # timeout is in seconds
|
||||||
|
# timeout: 10
|
||||||
|
{% endif%}
|
||||||
|
{% else %}
|
||||||
|
# trace:
|
||||||
|
# enabled: true
|
||||||
|
# # set sample_rate to 1 if you wanna sampling 100% of trace data; set 0.5 if you wanna sampling 50% of trace data, and so forth
|
||||||
|
# sample_rate: 1
|
||||||
|
# # # namespace used to differentiate different harbor services
|
||||||
|
# # namespace:
|
||||||
|
# # # attributes is a key value dict contains user defined attributes used to initialize trace provider
|
||||||
|
# # attributes:
|
||||||
|
# # application: harbor
|
||||||
|
# # jaeger:
|
||||||
|
# # endpoint: http://hostname:14268/api/traces
|
||||||
|
# # username:
|
||||||
|
# # password:
|
||||||
|
# # agent_host: hostname
|
||||||
|
# # agent_port: 6831
|
||||||
|
# # otel:
|
||||||
|
# # endpoint: hostname:4318
|
||||||
|
# # url_path: /v1/traces
|
||||||
|
# # compression: false
|
||||||
|
# # insecure: true
|
||||||
|
# # # timeout is in seconds
|
||||||
|
# # timeout: 10
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
# enable purge _upload directories
|
||||||
|
{% if upload_purging is defined %}
|
||||||
|
upload_purging:
|
||||||
|
enabled: {{ upload_purging.enabled | lower}}
|
||||||
|
age: {{ upload_purging.age }}
|
||||||
|
interval: {{ upload_purging.interval }}
|
||||||
|
dryrun: {{ upload_purging.dryrun | lower}}
|
||||||
|
{% else %}
|
||||||
|
upload_purging:
|
||||||
|
enabled: true
|
||||||
|
# remove files in _upload directories which exist for a period of time, default is one week.
|
||||||
|
age: 168h
|
||||||
|
# the interval of the purge operations
|
||||||
|
interval: 24h
|
||||||
|
dryrun: false
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
# Cache layer related config
|
||||||
|
{% if cache is defined %}
|
||||||
|
cache:
|
||||||
|
enabled: {{ cache.enabled | lower}}
|
||||||
|
expire_hours: {{ cache.expire_hours }}
|
||||||
|
{% else %}
|
||||||
|
cache:
|
||||||
|
enabled: false
|
||||||
|
expire_hours: 24
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
# Harbor core configurations
|
||||||
|
# Uncomment to enable the following harbor core related configuration items.
|
||||||
|
{% if core is defined %}
|
||||||
|
core:
|
||||||
|
# The provider for updating project quota(usage), there are 2 options, redis or db,
|
||||||
|
# by default is implemented by db but you can switch the updation via redis which
|
||||||
|
# can improve the performance of high concurrent pushing to the same project,
|
||||||
|
# and reduce the database connections spike and occupies.
|
||||||
|
# By redis will bring up some delay for quota usage updation for display, so only
|
||||||
|
# suggest switch provider to redis if you were ran into the db connections spike aroud
|
||||||
|
# the scenario of high concurrent pushing to same project, no improvment for other scenes.
|
||||||
|
quota_update_provider: {{ core.quota_update_provider }}
|
||||||
|
{% else %}
|
||||||
|
# core:
|
||||||
|
# # The provider for updating project quota(usage), there are 2 options, redis or db,
|
||||||
|
# # by default is implemented by db but you can switch the updation via redis which
|
||||||
|
# # can improve the performance of high concurrent pushing to the same project,
|
||||||
|
# # and reduce the database connections spike and occupies.
|
||||||
|
# # By redis will bring up some delay for quota usage updation for display, so only
|
||||||
|
# # suggest switch provider to redis if you were ran into the db connections spike around
|
||||||
|
# # the scenario of high concurrent pushing to same project, no improvement for other scenes.
|
||||||
|
# quota_update_provider: redis # Or db
|
||||||
|
{% endif %}
|
|
@ -40,7 +40,7 @@ REGISTRY_CREDENTIAL_USERNAME={{registry_username}}
|
||||||
REGISTRY_CREDENTIAL_PASSWORD={{registry_password}}
|
REGISTRY_CREDENTIAL_PASSWORD={{registry_password}}
|
||||||
CSRF_KEY={{csrf_key}}
|
CSRF_KEY={{csrf_key}}
|
||||||
ROBOT_SCANNER_NAME_PREFIX={{scan_robot_prefix}}
|
ROBOT_SCANNER_NAME_PREFIX={{scan_robot_prefix}}
|
||||||
PERMITTED_REGISTRY_TYPES_FOR_PROXY_CACHE=docker-hub,harbor,azure-acr,ali-acr,aws-ecr,google-gcr,quay,docker-registry,github-ghcr,jfrog-artifactory
|
PERMITTED_REGISTRY_TYPES_FOR_PROXY_CACHE=docker-hub,harbor,azure-acr,ali-acr,aws-ecr,google-gcr,docker-registry,github-ghcr,jfrog-artifactory
|
||||||
REPLICATION_ADAPTER_WHITELIST=ali-acr,aws-ecr,azure-acr,docker-hub,docker-registry,github-ghcr,google-gcr,harbor,huawei-SWR,jfrog-artifactory,tencent-tcr,volcengine-cr
|
REPLICATION_ADAPTER_WHITELIST=ali-acr,aws-ecr,azure-acr,docker-hub,docker-registry,github-ghcr,google-gcr,harbor,huawei-SWR,jfrog-artifactory,tencent-tcr,volcengine-cr
|
||||||
|
|
||||||
HTTP_PROXY={{core_http_proxy}}
|
HTTP_PROXY={{core_http_proxy}}
|
||||||
|
|
|
@ -72,7 +72,7 @@ func (s *SecurityContext) IsSolutionUser() bool {
|
||||||
|
|
||||||
// Can returns true only when requesting pull/push operation against the specific project
|
// Can returns true only when requesting pull/push operation against the specific project
|
||||||
func (s *SecurityContext) Can(ctx context.Context, action types.Action, resource types.Resource) bool {
|
func (s *SecurityContext) Can(ctx context.Context, action types.Action, resource types.Resource) bool {
|
||||||
if !(action == rbac.ActionPull || action == rbac.ActionPush) {
|
if !(action == rbac.ActionPull || action == rbac.ActionPush || action == rbac.ActionDelete) {
|
||||||
log.Debugf("unauthorized for action %s", action)
|
log.Debugf("unauthorized for action %s", action)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
|
@ -63,8 +63,8 @@ func (p *proxyCacheSecretTestSuite) TestIsSolutionUser() {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *proxyCacheSecretTestSuite) TestCan() {
|
func (p *proxyCacheSecretTestSuite) TestCan() {
|
||||||
// the action isn't pull/push
|
// the action isn't pull/push/delete
|
||||||
action := rbac.ActionDelete
|
action := rbac.ActionUpdate
|
||||||
resource := project.NewNamespace(1).Resource(rbac.ResourceRepository)
|
resource := project.NewNamespace(1).Resource(rbac.ResourceRepository)
|
||||||
p.False(p.sc.Can(context.TODO(), action, resource))
|
p.False(p.sc.Can(context.TODO(), action, resource))
|
||||||
|
|
||||||
|
|
|
@ -149,6 +149,22 @@ type ManifestList struct {
|
||||||
ContentType string
|
ContentType string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// getManifestDigestInLocal get the artifact digest in local
|
||||||
|
func (c *controller) getManifestDigestInLocal(ctx context.Context, art lib.ArtifactInfo) (string, error) {
|
||||||
|
// Get the manifest from local registry
|
||||||
|
a, err := c.local.GetManifest(ctx, art)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if a == nil {
|
||||||
|
return "", errors.NotFoundError(fmt.Errorf("manifest %v not found in local registry", art.Repository))
|
||||||
|
}
|
||||||
|
if len(a.Digest) == 0 {
|
||||||
|
return "", errors.NotFoundError(fmt.Errorf("manifest %v not found in local registry", art.Repository))
|
||||||
|
}
|
||||||
|
return a.Digest, nil
|
||||||
|
}
|
||||||
|
|
||||||
// UseLocalManifest check if these manifest could be found in local registry,
|
// UseLocalManifest check if these manifest could be found in local registry,
|
||||||
// the return error should be nil when it is not found in local and need to delegate to remote registry
|
// the return error should be nil when it is not found in local and need to delegate to remote registry
|
||||||
// the return error should be NotFoundError when it is not found in remote registry
|
// the return error should be NotFoundError when it is not found in remote registry
|
||||||
|
@ -172,6 +188,16 @@ func (c *controller) UseLocalManifest(ctx context.Context, art lib.ArtifactInfo,
|
||||||
return false, nil, err
|
return false, nil, err
|
||||||
}
|
}
|
||||||
if !exist || desc == nil {
|
if !exist || desc == nil {
|
||||||
|
dig, err := c.getManifestDigestInLocal(ctx, art)
|
||||||
|
if err != nil {
|
||||||
|
// skip to delete when error, use debug level log to avoid too many logs when the manifest is removed from upstream
|
||||||
|
log.Debugf("failed to get manifest digest in local, error: %v, skip to delete it, art %+v", err, art)
|
||||||
|
} else {
|
||||||
|
go func() {
|
||||||
|
c.local.DeleteManifest(art.Repository, dig)
|
||||||
|
log.Infof("delete manifest %s with digest %s", art.Repository, dig)
|
||||||
|
}()
|
||||||
|
}
|
||||||
return false, nil, errors.NotFoundError(fmt.Errorf("repo %v, tag %v not found", art.Repository, art.Tag))
|
return false, nil, errors.NotFoundError(fmt.Errorf("repo %v, tag %v not found", art.Repository, art.Tag))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -109,10 +109,37 @@ func (c *controller) Start(ctx context.Context, policy *replicationmodel.Policy,
|
||||||
if op := operator.FromContext(ctx); op != "" {
|
if op := operator.FromContext(ctx); op != "" {
|
||||||
extra["operator"] = op
|
extra["operator"] = op
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var count int64
|
||||||
|
// If running executions are found, skip the current execution and mark it as error.
|
||||||
|
if policy.SingleActiveReplication {
|
||||||
|
var err error
|
||||||
|
count, err = c.execMgr.Count(ctx, &q.Query{
|
||||||
|
Keywords: map[string]any{
|
||||||
|
"VendorType": job.ReplicationVendorType,
|
||||||
|
"VendorID": policy.ID,
|
||||||
|
"Status": job.RunningStatus.String(),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("failed to count running executions for policy ID: %d: %v", policy.ID, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
id, err := c.execMgr.Create(ctx, job.ReplicationVendorType, policy.ID, trigger, extra)
|
id, err := c.execMgr.Create(ctx, job.ReplicationVendorType, policy.ID, trigger, extra)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if policy.SingleActiveReplication {
|
||||||
|
if count > 0 {
|
||||||
|
if err = c.execMgr.MarkError(ctx, id, "Execution skipped: active replication still in progress."); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return id, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// start the replication flow in background
|
// start the replication flow in background
|
||||||
// as the process runs inside a goroutine, the transaction in the outer ctx
|
// as the process runs inside a goroutine, the transaction in the outer ctx
|
||||||
// may be submitted already when the process starts, so create an new context
|
// may be submitted already when the process starts, so create an new context
|
||||||
|
|
|
@ -101,6 +101,38 @@ func (r *replicationTestSuite) TestStart() {
|
||||||
r.execMgr.AssertExpectations(r.T())
|
r.execMgr.AssertExpectations(r.T())
|
||||||
r.flowCtl.AssertExpectations(r.T())
|
r.flowCtl.AssertExpectations(r.T())
|
||||||
r.ormCreator.AssertExpectations(r.T())
|
r.ormCreator.AssertExpectations(r.T())
|
||||||
|
|
||||||
|
r.SetupTest()
|
||||||
|
|
||||||
|
// run replication flow with SingleActiveReplication, flow should not start
|
||||||
|
r.execMgr.On("Create", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(int64(1), nil)
|
||||||
|
r.execMgr.On("MarkError", mock.Anything, mock.Anything, mock.Anything).Return(nil)
|
||||||
|
r.execMgr.On("Count", mock.Anything, mock.Anything).Return(int64(1), nil) // Simulate an existing running execution
|
||||||
|
id, err = r.ctl.Start(context.Background(), &repctlmodel.Policy{Enabled: true, SingleActiveReplication: true}, nil, task.ExecutionTriggerManual)
|
||||||
|
r.Require().Nil(err)
|
||||||
|
r.Equal(int64(1), id)
|
||||||
|
time.Sleep(1 * time.Second) // wait the functions called in the goroutine
|
||||||
|
r.flowCtl.AssertNumberOfCalls(r.T(), "Start", 0)
|
||||||
|
r.execMgr.AssertNumberOfCalls(r.T(), "MarkError", 1) // Ensure execution marked as final status error
|
||||||
|
r.execMgr.AssertExpectations(r.T())
|
||||||
|
r.flowCtl.AssertExpectations(r.T())
|
||||||
|
r.ormCreator.AssertExpectations(r.T())
|
||||||
|
|
||||||
|
r.SetupTest()
|
||||||
|
|
||||||
|
// no error when running the replication flow with SingleActiveReplication
|
||||||
|
r.execMgr.On("Create", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(int64(1), nil)
|
||||||
|
r.execMgr.On("Get", mock.Anything, mock.Anything).Return(&task.Execution{}, nil)
|
||||||
|
r.flowCtl.On("Start", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil)
|
||||||
|
r.ormCreator.On("Create").Return(nil)
|
||||||
|
r.execMgr.On("Count", mock.Anything, mock.Anything).Return(int64(0), nil) // Simulate no running execution
|
||||||
|
id, err = r.ctl.Start(context.Background(), &repctlmodel.Policy{Enabled: true, SingleActiveReplication: true}, nil, task.ExecutionTriggerManual)
|
||||||
|
r.Require().Nil(err)
|
||||||
|
r.Equal(int64(1), id)
|
||||||
|
time.Sleep(1 * time.Second) // wait the functions called in the goroutine
|
||||||
|
r.execMgr.AssertExpectations(r.T())
|
||||||
|
r.flowCtl.AssertExpectations(r.T())
|
||||||
|
r.ormCreator.AssertExpectations(r.T())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *replicationTestSuite) TestStop() {
|
func (r *replicationTestSuite) TestStop() {
|
||||||
|
|
|
@ -47,6 +47,7 @@ type Policy struct {
|
||||||
UpdateTime time.Time `json:"update_time"`
|
UpdateTime time.Time `json:"update_time"`
|
||||||
Speed int32 `json:"speed"`
|
Speed int32 `json:"speed"`
|
||||||
CopyByChunk bool `json:"copy_by_chunk"`
|
CopyByChunk bool `json:"copy_by_chunk"`
|
||||||
|
SingleActiveReplication bool `json:"single_active_replication"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsScheduledTrigger returns true when the policy is scheduled trigger and enabled
|
// IsScheduledTrigger returns true when the policy is scheduled trigger and enabled
|
||||||
|
@ -141,6 +142,7 @@ func (p *Policy) From(policy *replicationmodel.Policy) error {
|
||||||
p.UpdateTime = policy.UpdateTime
|
p.UpdateTime = policy.UpdateTime
|
||||||
p.Speed = policy.Speed
|
p.Speed = policy.Speed
|
||||||
p.CopyByChunk = policy.CopyByChunk
|
p.CopyByChunk = policy.CopyByChunk
|
||||||
|
p.SingleActiveReplication = policy.SingleActiveReplication
|
||||||
|
|
||||||
if policy.SrcRegistryID > 0 {
|
if policy.SrcRegistryID > 0 {
|
||||||
p.SrcRegistry = &model.Registry{
|
p.SrcRegistry = &model.Registry{
|
||||||
|
@ -186,6 +188,7 @@ func (p *Policy) To() (*replicationmodel.Policy, error) {
|
||||||
UpdateTime: p.UpdateTime,
|
UpdateTime: p.UpdateTime,
|
||||||
Speed: p.Speed,
|
Speed: p.Speed,
|
||||||
CopyByChunk: p.CopyByChunk,
|
CopyByChunk: p.CopyByChunk,
|
||||||
|
SingleActiveReplication: p.SingleActiveReplication,
|
||||||
}
|
}
|
||||||
if p.SrcRegistry != nil {
|
if p.SrcRegistry != nil {
|
||||||
policy.SrcRegistryID = p.SrcRegistry.ID
|
policy.SrcRegistryID = p.SrcRegistry.ID
|
||||||
|
|
|
@ -597,7 +597,9 @@ func (bc *basicController) GetReport(ctx context.Context, artifact *ar.Artifact,
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if !scannable {
|
// When the scanner is unhealthy, the artifact will be recognized as "not scannable", in this case we will not return the error
|
||||||
|
// but return the scanner report with the best effort.
|
||||||
|
if !scannable && r.Health == sc.StatusHealthy {
|
||||||
return nil, errors.NotFoundError(nil).WithMessagef("report not found for %s@%s", artifact.RepositoryName, artifact.Digest)
|
return nil, errors.NotFoundError(nil).WithMessagef("report not found for %s@%s", artifact.RepositoryName, artifact.Digest)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -36,8 +36,8 @@ import (
|
||||||
|
|
||||||
const (
|
const (
|
||||||
proScannerMetaKey = "projectScanner"
|
proScannerMetaKey = "projectScanner"
|
||||||
statusUnhealthy = "unhealthy"
|
StatusUnhealthy = "unhealthy"
|
||||||
statusHealthy = "healthy"
|
StatusHealthy = "healthy"
|
||||||
// RetrieveCapFailMsg the message indicate failed to retrieve the scanner capabilities
|
// RetrieveCapFailMsg the message indicate failed to retrieve the scanner capabilities
|
||||||
RetrieveCapFailMsg = "failed to retrieve scanner capabilities, error %v"
|
RetrieveCapFailMsg = "failed to retrieve scanner capabilities, error %v"
|
||||||
)
|
)
|
||||||
|
@ -287,9 +287,9 @@ func (bc *basicController) GetRegistrationByProject(ctx context.Context, project
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Not blocked, just logged it
|
// Not blocked, just logged it
|
||||||
log.Error(errors.Wrap(err, "api controller: get project scanner"))
|
log.Error(errors.Wrap(err, "api controller: get project scanner"))
|
||||||
registration.Health = statusUnhealthy
|
registration.Health = StatusUnhealthy
|
||||||
} else {
|
} else {
|
||||||
registration.Health = statusHealthy
|
registration.Health = StatusHealthy
|
||||||
// Fill in some metadata
|
// Fill in some metadata
|
||||||
registration.Adapter = meta.Scanner.Name
|
registration.Adapter = meta.Scanner.Name
|
||||||
registration.Vendor = meta.Scanner.Vendor
|
registration.Vendor = meta.Scanner.Vendor
|
||||||
|
|
35
src/go.mod
35
src/go.mod
|
@ -1,6 +1,6 @@
|
||||||
module github.com/goharbor/harbor/src
|
module github.com/goharbor/harbor/src
|
||||||
|
|
||||||
go 1.24.5
|
go 1.24.6
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/CloudNativeAI/model-spec v0.0.5
|
github.com/CloudNativeAI/model-spec v0.0.5
|
||||||
|
@ -22,12 +22,12 @@ require (
|
||||||
github.com/go-asn1-ber/asn1-ber v1.5.7
|
github.com/go-asn1-ber/asn1-ber v1.5.7
|
||||||
github.com/go-ldap/ldap/v3 v3.4.10
|
github.com/go-ldap/ldap/v3 v3.4.10
|
||||||
github.com/go-openapi/errors v0.22.0
|
github.com/go-openapi/errors v0.22.0
|
||||||
github.com/go-openapi/loads v0.22.0
|
github.com/go-openapi/loads v0.22.0 // indirect
|
||||||
github.com/go-openapi/runtime v0.28.0
|
github.com/go-openapi/runtime v0.28.0
|
||||||
github.com/go-openapi/spec v0.21.0
|
github.com/go-openapi/spec v0.21.0 // indirect
|
||||||
github.com/go-openapi/strfmt v0.23.0
|
github.com/go-openapi/strfmt v0.23.0
|
||||||
github.com/go-openapi/swag v0.23.0
|
github.com/go-openapi/swag v0.23.0
|
||||||
github.com/go-openapi/validate v0.24.0
|
github.com/go-openapi/validate v0.24.0 // indirect
|
||||||
github.com/go-redis/redis/v8 v8.11.4
|
github.com/go-redis/redis/v8 v8.11.4
|
||||||
github.com/gocarina/gocsv v0.0.0-20210516172204-ca9e8a8ddea8
|
github.com/gocarina/gocsv v0.0.0-20210516172204-ca9e8a8ddea8
|
||||||
github.com/gocraft/work v0.5.1
|
github.com/gocraft/work v0.5.1
|
||||||
|
@ -56,7 +56,7 @@ require (
|
||||||
github.com/stretchr/testify v1.10.0
|
github.com/stretchr/testify v1.10.0
|
||||||
github.com/tencentcloud/tencentcloud-sdk-go v3.0.233+incompatible
|
github.com/tencentcloud/tencentcloud-sdk-go v3.0.233+incompatible
|
||||||
github.com/vmihailenco/msgpack/v5 v5.4.1
|
github.com/vmihailenco/msgpack/v5 v5.4.1
|
||||||
github.com/volcengine/volcengine-go-sdk v1.1.19
|
github.com/volcengine/volcengine-go-sdk v1.1.26
|
||||||
go.opentelemetry.io/contrib/instrumentation/github.com/gorilla/mux/otelmux v0.59.0
|
go.opentelemetry.io/contrib/instrumentation/github.com/gorilla/mux/otelmux v0.59.0
|
||||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0
|
||||||
go.opentelemetry.io/otel v1.35.0
|
go.opentelemetry.io/otel v1.35.0
|
||||||
|
@ -66,19 +66,19 @@ require (
|
||||||
go.opentelemetry.io/otel/trace v1.35.0
|
go.opentelemetry.io/otel/trace v1.35.0
|
||||||
go.pinniped.dev v0.37.0
|
go.pinniped.dev v0.37.0
|
||||||
go.uber.org/ratelimit v0.3.1
|
go.uber.org/ratelimit v0.3.1
|
||||||
golang.org/x/crypto v0.39.0
|
golang.org/x/crypto v0.40.0
|
||||||
golang.org/x/net v0.40.0
|
golang.org/x/net v0.41.0
|
||||||
golang.org/x/oauth2 v0.28.0
|
golang.org/x/oauth2 v0.28.0
|
||||||
golang.org/x/sync v0.15.0
|
golang.org/x/sync v0.16.0
|
||||||
golang.org/x/text v0.26.0
|
golang.org/x/text v0.27.0
|
||||||
golang.org/x/time v0.11.0
|
golang.org/x/time v0.11.0
|
||||||
gopkg.in/h2non/gock.v1 v1.1.2
|
gopkg.in/h2non/gock.v1 v1.1.2
|
||||||
gopkg.in/yaml.v2 v2.4.0
|
gopkg.in/yaml.v2 v2.4.0
|
||||||
helm.sh/helm/v3 v3.18.4
|
helm.sh/helm/v3 v3.18.5
|
||||||
k8s.io/api v0.33.2
|
k8s.io/api v0.33.3
|
||||||
k8s.io/apimachinery v0.33.2
|
k8s.io/apimachinery v0.33.3
|
||||||
k8s.io/client-go v0.33.2
|
k8s.io/client-go v0.33.3
|
||||||
sigs.k8s.io/yaml v1.4.0
|
sigs.k8s.io/yaml v1.5.0
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
|
@ -161,7 +161,7 @@ require (
|
||||||
github.com/sourcegraph/conc v0.3.0 // indirect
|
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||||
github.com/spf13/afero v1.11.0 // indirect
|
github.com/spf13/afero v1.11.0 // indirect
|
||||||
github.com/spf13/cast v1.7.0 // indirect
|
github.com/spf13/cast v1.7.0 // indirect
|
||||||
github.com/spf13/pflag v1.0.6 // indirect
|
github.com/spf13/pflag v1.0.7 // indirect
|
||||||
github.com/stretchr/objx v0.5.2 // indirect
|
github.com/stretchr/objx v0.5.2 // indirect
|
||||||
github.com/subosito/gotenv v1.6.0 // indirect
|
github.com/subosito/gotenv v1.6.0 // indirect
|
||||||
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
||||||
|
@ -177,9 +177,10 @@ require (
|
||||||
go.uber.org/atomic v1.9.0 // indirect
|
go.uber.org/atomic v1.9.0 // indirect
|
||||||
go.uber.org/multierr v1.11.0 // indirect
|
go.uber.org/multierr v1.11.0 // indirect
|
||||||
go.uber.org/zap v1.27.0 // indirect
|
go.uber.org/zap v1.27.0 // indirect
|
||||||
|
go.yaml.in/yaml/v2 v2.4.2 // indirect
|
||||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
|
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
|
||||||
golang.org/x/sys v0.33.0 // indirect
|
golang.org/x/sys v0.34.0 // indirect
|
||||||
golang.org/x/term v0.32.0 // indirect
|
golang.org/x/term v0.33.0 // indirect
|
||||||
google.golang.org/api v0.171.0 // indirect
|
google.golang.org/api v0.171.0 // indirect
|
||||||
google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8 // indirect
|
google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8 // indirect
|
||||||
google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f // indirect
|
google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f // indirect
|
||||||
|
|
59
src/go.sum
59
src/go.sum
|
@ -522,8 +522,8 @@ github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8=
|
||||||
github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY=
|
github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY=
|
||||||
github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w=
|
github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w=
|
||||||
github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
|
github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
|
||||||
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
|
github.com/spf13/pflag v1.0.7 h1:vN6T9TfwStFPFM5XzjsvmzZkLuaLX+HS+0SeFLRgU6M=
|
||||||
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
github.com/spf13/pflag v1.0.7/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||||
github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI=
|
github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI=
|
||||||
github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg=
|
github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg=
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
|
@ -564,8 +564,8 @@ github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAh
|
||||||
github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds=
|
github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds=
|
||||||
github.com/volcengine/volc-sdk-golang v1.0.23 h1:anOslb2Qp6ywnsbyq9jqR0ljuO63kg9PY+4OehIk5R8=
|
github.com/volcengine/volc-sdk-golang v1.0.23 h1:anOslb2Qp6ywnsbyq9jqR0ljuO63kg9PY+4OehIk5R8=
|
||||||
github.com/volcengine/volc-sdk-golang v1.0.23/go.mod h1:AfG/PZRUkHJ9inETvbjNifTDgut25Wbkm2QoYBTbvyU=
|
github.com/volcengine/volc-sdk-golang v1.0.23/go.mod h1:AfG/PZRUkHJ9inETvbjNifTDgut25Wbkm2QoYBTbvyU=
|
||||||
github.com/volcengine/volcengine-go-sdk v1.1.19 h1:+jLVMqDtdtiAt8QGBk6AMiEg22Br5SZGu2FSHUrIcU0=
|
github.com/volcengine/volcengine-go-sdk v1.1.26 h1:yMFYnfBbrHO5h/kONOMlgZqviN+XSNq1t3kGwNgipGE=
|
||||||
github.com/volcengine/volcengine-go-sdk v1.1.19/go.mod h1:EyKoi6t6eZxoPNGr2GdFCZti2Skd7MO3eUzx7TtSvNo=
|
github.com/volcengine/volcengine-go-sdk v1.1.26/go.mod h1:EyKoi6t6eZxoPNGr2GdFCZti2Skd7MO3eUzx7TtSvNo=
|
||||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||||
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
||||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
|
@ -621,6 +621,10 @@ go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||||
go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
|
go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
|
||||||
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
|
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
|
||||||
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
|
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
|
||||||
|
go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI=
|
||||||
|
go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU=
|
||||||
|
go.yaml.in/yaml/v3 v3.0.3 h1:bXOww4E/J3f66rav3pX3m8w6jDE4knZjGOw8b5Y6iNE=
|
||||||
|
go.yaml.in/yaml/v3 v3.0.3/go.mod h1:tBHosrYAkRZjRAOREWbDnBXUf08JOwYq++0QNwQiWzI=
|
||||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
|
golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
|
||||||
|
@ -638,8 +642,8 @@ golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliY
|
||||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||||
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
|
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
|
||||||
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
||||||
golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM=
|
golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM=
|
||||||
golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U=
|
golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY=
|
||||||
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
|
@ -693,8 +697,8 @@ golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
|
||||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
||||||
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
||||||
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
|
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
|
||||||
golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY=
|
golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw=
|
||||||
golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds=
|
golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA=
|
||||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
golang.org/x/oauth2 v0.28.0 h1:CrgCKl8PPAVtLnU3c+EDw6x11699EWlsDeWNWKdIOkc=
|
golang.org/x/oauth2 v0.28.0 h1:CrgCKl8PPAVtLnU3c+EDw6x11699EWlsDeWNWKdIOkc=
|
||||||
golang.org/x/oauth2 v0.28.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
|
golang.org/x/oauth2 v0.28.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
|
||||||
|
@ -710,8 +714,8 @@ golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||||
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||||
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||||
golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8=
|
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
|
||||||
golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
@ -746,8 +750,8 @@ golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
|
golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA=
|
||||||
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||||
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
|
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
|
||||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
|
@ -758,8 +762,8 @@ golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
|
||||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||||
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
|
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
|
||||||
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
|
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
|
||||||
golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg=
|
golang.org/x/term v0.33.0 h1:NuFncQrRcaRvVmgRkvM3j/F00gWIAlcmlB8ACEKmGIg=
|
||||||
golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ=
|
golang.org/x/term v0.33.0/go.mod h1:s18+ql9tYWp1IfpV9DmCtQDDSRBUjKaw9M1eAv5UeF0=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
|
@ -772,8 +776,8 @@ golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||||
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||||
golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M=
|
golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4=
|
||||||
golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA=
|
golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU=
|
||||||
golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0=
|
golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0=
|
||||||
golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
|
golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
|
||||||
golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
@ -798,8 +802,8 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc
|
||||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||||
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
|
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
|
||||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
||||||
golang.org/x/tools v0.33.0 h1:4qz2S3zmRxbGIhDIAgjxvFutSvH5EfnsYrRBj0UI0bc=
|
golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo=
|
||||||
golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI=
|
golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg=
|
||||||
golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
@ -874,17 +878,17 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0=
|
gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0=
|
||||||
gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
|
gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
|
||||||
helm.sh/helm/v3 v3.18.4 h1:pNhnHM3nAmDrxz6/UC+hfjDY4yeDATQCka2/87hkZXQ=
|
helm.sh/helm/v3 v3.18.5 h1:Cc3Z5vd6kDrZq9wO9KxKLNEickiTho6/H/dBNRVSos4=
|
||||||
helm.sh/helm/v3 v3.18.4/go.mod h1:WVnwKARAw01iEdjpEkP7Ii1tT1pTPYfM1HsakFKM3LI=
|
helm.sh/helm/v3 v3.18.5/go.mod h1:L/dXDR2r539oPlFP1PJqKAC1CUgqHJDLkxKpDGrWnyg=
|
||||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||||
k8s.io/api v0.33.2 h1:YgwIS5jKfA+BZg//OQhkJNIfie/kmRsO0BmNaVSimvY=
|
k8s.io/api v0.33.3 h1:SRd5t//hhkI1buzxb288fy2xvjubstenEKL9K51KBI8=
|
||||||
k8s.io/api v0.33.2/go.mod h1:fhrbphQJSM2cXzCWgqU29xLDuks4mu7ti9vveEnpSXs=
|
k8s.io/api v0.33.3/go.mod h1:01Y/iLUjNBM3TAvypct7DIj0M0NIZc+PzAHCIo0CYGE=
|
||||||
k8s.io/apimachinery v0.33.2 h1:IHFVhqg59mb8PJWTLi8m1mAoepkUNYmptHsV+Z1m5jY=
|
k8s.io/apimachinery v0.33.3 h1:4ZSrmNa0c/ZpZJhAgRdcsFcZOw1PQU1bALVQ0B3I5LA=
|
||||||
k8s.io/apimachinery v0.33.2/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM=
|
k8s.io/apimachinery v0.33.3/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM=
|
||||||
k8s.io/client-go v0.33.2 h1:z8CIcc0P581x/J1ZYf4CNzRKxRvQAwoAolYPbtQes+E=
|
k8s.io/client-go v0.33.3 h1:M5AfDnKfYmVJif92ngN532gFqakcGi6RvaOF16efrpA=
|
||||||
k8s.io/client-go v0.33.2/go.mod h1:9mCgT4wROvL948w6f6ArJNb7yQd7QsvqavDeZHvNmHo=
|
k8s.io/client-go v0.33.3/go.mod h1:luqKBQggEf3shbxHY4uVENAxrDISLOarxpTKMiUuujg=
|
||||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||||
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4=
|
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4=
|
||||||
|
@ -899,5 +903,6 @@ sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
|
||||||
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
|
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
|
||||||
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc=
|
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc=
|
||||||
sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps=
|
sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps=
|
||||||
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
|
|
||||||
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
|
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
|
||||||
|
sigs.k8s.io/yaml v1.5.0 h1:M10b2U7aEUY6hRtU870n2VTPgR5RZiL/I6Lcc2F4NUQ=
|
||||||
|
sigs.k8s.io/yaml v1.5.0/go.mod h1:wZs27Rbxoai4C0f8/9urLZtZtF3avA3gKvGyPdDqTO4=
|
||||||
|
|
|
@ -43,6 +43,7 @@ type Policy struct {
|
||||||
UpdateTime time.Time `orm:"column(update_time);auto_now"`
|
UpdateTime time.Time `orm:"column(update_time);auto_now"`
|
||||||
Speed int32 `orm:"column(speed_kb)"`
|
Speed int32 `orm:"column(speed_kb)"`
|
||||||
CopyByChunk bool `orm:"column(copy_by_chunk)"`
|
CopyByChunk bool `orm:"column(copy_by_chunk)"`
|
||||||
|
SingleActiveReplication bool `orm:"column(single_active_replication)"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// TableName set table name for ORM
|
// TableName set table name for ORM
|
||||||
|
|
|
@ -825,6 +825,39 @@
|
||||||
'REPLICATION.ENABLED_RULE' | translate
|
'REPLICATION.ENABLED_RULE' | translate
|
||||||
}}</label>
|
}}</label>
|
||||||
</div>
|
</div>
|
||||||
|
<div
|
||||||
|
class="clr-checkbox-wrapper"
|
||||||
|
[hidden]="!isNotEventBased()">
|
||||||
|
<input
|
||||||
|
type="checkbox"
|
||||||
|
class="clr-checkbox"
|
||||||
|
[checked]="true"
|
||||||
|
id="singleActiveReplication"
|
||||||
|
formControlName="single_active_replication" />
|
||||||
|
<label
|
||||||
|
for="singleActiveReplication"
|
||||||
|
class="clr-control-label single-active"
|
||||||
|
>{{
|
||||||
|
'REPLICATION.SINGLE_ACTIVE_REPLICATION'
|
||||||
|
| translate
|
||||||
|
}}
|
||||||
|
<clr-tooltip class="override-tooltip">
|
||||||
|
<clr-icon
|
||||||
|
clrTooltipTrigger
|
||||||
|
shape="info-circle"
|
||||||
|
size="24"></clr-icon>
|
||||||
|
<clr-tooltip-content
|
||||||
|
clrPosition="top-left"
|
||||||
|
clrSize="md"
|
||||||
|
*clrIfOpen>
|
||||||
|
<span>{{
|
||||||
|
'TOOLTIP.SINGLE_ACTIVE_REPLICATION'
|
||||||
|
| translate
|
||||||
|
}}</span>
|
||||||
|
</clr-tooltip-content>
|
||||||
|
</clr-tooltip>
|
||||||
|
</label>
|
||||||
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</form>
|
</form>
|
||||||
|
|
|
@ -246,6 +246,10 @@ clr-modal {
|
||||||
width: 8.6rem;
|
width: 8.6rem;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
.single-active {
|
||||||
|
width: 16rem;
|
||||||
|
}
|
||||||
|
|
||||||
.des-tooltip {
|
.des-tooltip {
|
||||||
margin-left: 0.5rem;
|
margin-left: 0.5rem;
|
||||||
}
|
}
|
||||||
|
|
|
@ -334,6 +334,7 @@ export class CreateEditRuleComponent implements OnInit, OnDestroy {
|
||||||
override: true,
|
override: true,
|
||||||
speed: -1,
|
speed: -1,
|
||||||
copy_by_chunk: false,
|
copy_by_chunk: false,
|
||||||
|
single_active_replication: false,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -367,6 +368,7 @@ export class CreateEditRuleComponent implements OnInit, OnDestroy {
|
||||||
dest_namespace_replace_count: Flatten_Level.FLATTEN_LEVEl_1,
|
dest_namespace_replace_count: Flatten_Level.FLATTEN_LEVEl_1,
|
||||||
speed: -1,
|
speed: -1,
|
||||||
copy_by_chunk: false,
|
copy_by_chunk: false,
|
||||||
|
single_active_replication: false,
|
||||||
});
|
});
|
||||||
this.isPushMode = true;
|
this.isPushMode = true;
|
||||||
this.selectedUnit = BandwidthUnit.KB;
|
this.selectedUnit = BandwidthUnit.KB;
|
||||||
|
@ -410,6 +412,7 @@ export class CreateEditRuleComponent implements OnInit, OnDestroy {
|
||||||
override: rule.override,
|
override: rule.override,
|
||||||
speed: speed,
|
speed: speed,
|
||||||
copy_by_chunk: rule.copy_by_chunk,
|
copy_by_chunk: rule.copy_by_chunk,
|
||||||
|
single_active_replication: rule.single_active_replication,
|
||||||
});
|
});
|
||||||
let filtersArray = this.getFilterArray(rule);
|
let filtersArray = this.getFilterArray(rule);
|
||||||
this.noSelectedEndpoint = false;
|
this.noSelectedEndpoint = false;
|
||||||
|
@ -510,6 +513,9 @@ export class CreateEditRuleComponent implements OnInit, OnDestroy {
|
||||||
}
|
}
|
||||||
|
|
||||||
onSubmit() {
|
onSubmit() {
|
||||||
|
if (this.ruleForm.value.trigger.type === 'event_based') {
|
||||||
|
this.ruleForm.get('single_active_replication').setValue(false);
|
||||||
|
}
|
||||||
if (this.ruleForm.value.trigger.type !== 'scheduled') {
|
if (this.ruleForm.value.trigger.type !== 'scheduled') {
|
||||||
this.ruleForm
|
this.ruleForm
|
||||||
.get('trigger')
|
.get('trigger')
|
||||||
|
|
|
@ -138,7 +138,7 @@
|
||||||
}}</a>
|
}}</a>
|
||||||
</clr-dg-cell>
|
</clr-dg-cell>
|
||||||
<clr-dg-cell>
|
<clr-dg-cell>
|
||||||
{{ getStatusStr(j.status) }}
|
{{ getStatusStr(j.status, j.status_text) }}
|
||||||
<clr-tooltip>
|
<clr-tooltip>
|
||||||
<clr-icon
|
<clr-icon
|
||||||
*ngIf="j.status_text"
|
*ngIf="j.status_text"
|
||||||
|
|
|
@ -644,7 +644,11 @@ export class ReplicationComponent implements OnInit, OnDestroy {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
getStatusStr(status: string): string {
|
getStatusStr(status: string, status_text: string): string {
|
||||||
|
// If status is Failed and status_text has 'Execution skipped', it means the replication task is skipped.
|
||||||
|
if (status === 'Failed' && status_text.startsWith('Execution skipped'))
|
||||||
|
return 'Skipped';
|
||||||
|
|
||||||
if (STATUS_MAP && STATUS_MAP[status]) {
|
if (STATUS_MAP && STATUS_MAP[status]) {
|
||||||
return STATUS_MAP[status];
|
return STATUS_MAP[status];
|
||||||
}
|
}
|
||||||
|
|
|
@ -76,6 +76,7 @@
|
||||||
"PULL_BASED": "Lade die Ressourcen von der entfernten Registry auf den lokalen Harbor runter.",
|
"PULL_BASED": "Lade die Ressourcen von der entfernten Registry auf den lokalen Harbor runter.",
|
||||||
"DESTINATION_NAMESPACE": "Spezifizieren des Ziel-Namespace. Wenn das Feld leer ist, werden die Ressourcen unter dem gleichen Namespace abgelegt wie in der Quelle.",
|
"DESTINATION_NAMESPACE": "Spezifizieren des Ziel-Namespace. Wenn das Feld leer ist, werden die Ressourcen unter dem gleichen Namespace abgelegt wie in der Quelle.",
|
||||||
"OVERRIDE": "Spezifizieren, ob die Ressourcen am Ziel überschrieben werden sollen, falls eine Ressource mit gleichem Namen existiert.",
|
"OVERRIDE": "Spezifizieren, ob die Ressourcen am Ziel überschrieben werden sollen, falls eine Ressource mit gleichem Namen existiert.",
|
||||||
|
"SINGLE_ACTIVE_REPLICATION": "Specify whether to skip execution until the previous active execution finishes, avoiding the execution of the same replication rules multiple times in parallel.",
|
||||||
"EMAIL": "E-Mail sollte eine gültige E-Mail-Adresse wie name@example.com sein.",
|
"EMAIL": "E-Mail sollte eine gültige E-Mail-Adresse wie name@example.com sein.",
|
||||||
"USER_NAME": "Darf keine Sonderzeichen enthalten und sollte kürzer als 255 Zeichen sein.",
|
"USER_NAME": "Darf keine Sonderzeichen enthalten und sollte kürzer als 255 Zeichen sein.",
|
||||||
"FULL_NAME": "Maximale Länge soll 20 Zeichen sein.",
|
"FULL_NAME": "Maximale Länge soll 20 Zeichen sein.",
|
||||||
|
@ -578,6 +579,7 @@
|
||||||
"ALLOWED_CHARACTERS": "Erlaubte Sonderzeichen",
|
"ALLOWED_CHARACTERS": "Erlaubte Sonderzeichen",
|
||||||
"TOTAL": "Gesamt",
|
"TOTAL": "Gesamt",
|
||||||
"OVERRIDE": "Überschreiben",
|
"OVERRIDE": "Überschreiben",
|
||||||
|
"SINGLE_ACTIVE_REPLICATION": "Single active replication",
|
||||||
"ENABLED_RULE": "Aktiviere Regel",
|
"ENABLED_RULE": "Aktiviere Regel",
|
||||||
"OVERRIDE_INFO": "Überschreiben",
|
"OVERRIDE_INFO": "Überschreiben",
|
||||||
"OPERATION": "Operation",
|
"OPERATION": "Operation",
|
||||||
|
|
|
@ -76,6 +76,7 @@
|
||||||
"PULL_BASED": "Pull the resources from the remote registry to the local Harbor.",
|
"PULL_BASED": "Pull the resources from the remote registry to the local Harbor.",
|
||||||
"DESTINATION_NAMESPACE": "Specify the destination namespace. If empty, the resources will be put under the same namespace as the source.",
|
"DESTINATION_NAMESPACE": "Specify the destination namespace. If empty, the resources will be put under the same namespace as the source.",
|
||||||
"OVERRIDE": "Specify whether to override the resources at the destination if a resource with the same name exists.",
|
"OVERRIDE": "Specify whether to override the resources at the destination if a resource with the same name exists.",
|
||||||
|
"SINGLE_ACTIVE_REPLICATION": "Specify whether to skip execution until the previous active execution finishes, avoiding the execution of the same replication rules multiple times in parallel.",
|
||||||
"EMAIL": "Email should be a valid email address like name@example.com.",
|
"EMAIL": "Email should be a valid email address like name@example.com.",
|
||||||
"USER_NAME": "Cannot contain special characters and maximum length should be 255 characters.",
|
"USER_NAME": "Cannot contain special characters and maximum length should be 255 characters.",
|
||||||
"FULL_NAME": "Maximum length should be 20 characters.",
|
"FULL_NAME": "Maximum length should be 20 characters.",
|
||||||
|
@ -578,6 +579,7 @@
|
||||||
"ALLOWED_CHARACTERS": "Allowed special characters",
|
"ALLOWED_CHARACTERS": "Allowed special characters",
|
||||||
"TOTAL": "Total",
|
"TOTAL": "Total",
|
||||||
"OVERRIDE": "Override",
|
"OVERRIDE": "Override",
|
||||||
|
"SINGLE_ACTIVE_REPLICATION": "Single active replication",
|
||||||
"ENABLED_RULE": "Enable rule",
|
"ENABLED_RULE": "Enable rule",
|
||||||
"OVERRIDE_INFO": "Override",
|
"OVERRIDE_INFO": "Override",
|
||||||
"OPERATION": "Operation",
|
"OPERATION": "Operation",
|
||||||
|
|
|
@ -76,6 +76,7 @@
|
||||||
"PULL_BASED": "Pull de recursos del remote registry al local Harbor.",
|
"PULL_BASED": "Pull de recursos del remote registry al local Harbor.",
|
||||||
"DESTINATION_NAMESPACE": "Especificar el namespace de destino. Si esta vacio, los recursos se colocan en el mismo namespace del recurso.",
|
"DESTINATION_NAMESPACE": "Especificar el namespace de destino. Si esta vacio, los recursos se colocan en el mismo namespace del recurso.",
|
||||||
"OVERRIDE": "Especifique si desea anular los recursos en el destino si existe un recurso con el mismo nombre.",
|
"OVERRIDE": "Especifique si desea anular los recursos en el destino si existe un recurso con el mismo nombre.",
|
||||||
|
"SINGLE_ACTIVE_REPLICATION": "Specify whether to skip execution until the previous active execution finishes, avoiding the execution of the same replication rules multiple times in parallel.",
|
||||||
"EMAIL": "El email debe ser una dirección válida como nombre@ejemplo.com.",
|
"EMAIL": "El email debe ser una dirección válida como nombre@ejemplo.com.",
|
||||||
"USER_NAME": "Debe tener una longitud máxima de 255 caracteres y no puede contener caracteres especiales.",
|
"USER_NAME": "Debe tener una longitud máxima de 255 caracteres y no puede contener caracteres especiales.",
|
||||||
"FULL_NAME": "La longitud máxima debería ser de 20 caracteres.",
|
"FULL_NAME": "La longitud máxima debería ser de 20 caracteres.",
|
||||||
|
@ -578,6 +579,7 @@
|
||||||
"ALLOWED_CHARACTERS": "Caracteres Especiales Permitidos",
|
"ALLOWED_CHARACTERS": "Caracteres Especiales Permitidos",
|
||||||
"TOTAL": "Total",
|
"TOTAL": "Total",
|
||||||
"OVERRIDE": "Sobreescribir",
|
"OVERRIDE": "Sobreescribir",
|
||||||
|
"SINGLE_ACTIVE_REPLICATION": "Single active replication",
|
||||||
"ENABLED_RULE": "Activar regla",
|
"ENABLED_RULE": "Activar regla",
|
||||||
"OVERRIDE_INFO": "Sobreescribir",
|
"OVERRIDE_INFO": "Sobreescribir",
|
||||||
"CURRENT": "Actual",
|
"CURRENT": "Actual",
|
||||||
|
|
|
@ -76,6 +76,7 @@
|
||||||
"PULL_BASED": "Pull les ressources du registre distant vers le Harbor local.",
|
"PULL_BASED": "Pull les ressources du registre distant vers le Harbor local.",
|
||||||
"DESTINATION_NAMESPACE": "Spécifier l'espace de nom de destination. Si vide, les ressources seront placées sous le même espace de nom que la source.",
|
"DESTINATION_NAMESPACE": "Spécifier l'espace de nom de destination. Si vide, les ressources seront placées sous le même espace de nom que la source.",
|
||||||
"OVERRIDE": "Spécifier s'il faut remplacer les ressources dans la destination si une ressource avec le même nom existe.",
|
"OVERRIDE": "Spécifier s'il faut remplacer les ressources dans la destination si une ressource avec le même nom existe.",
|
||||||
|
"SINGLE_ACTIVE_REPLICATION": "Specify whether to skip execution until the previous active execution finishes, avoiding the execution of the same replication rules multiple times in parallel.",
|
||||||
"EMAIL": "L'e-mail doit être une adresse e-mail valide comme name@example.com.",
|
"EMAIL": "L'e-mail doit être une adresse e-mail valide comme name@example.com.",
|
||||||
"USER_NAME": "Ne peut pas contenir de caractères spéciaux et la longueur maximale est de 255 caractères.",
|
"USER_NAME": "Ne peut pas contenir de caractères spéciaux et la longueur maximale est de 255 caractères.",
|
||||||
"FULL_NAME": "La longueur maximale est de 20 caractères.",
|
"FULL_NAME": "La longueur maximale est de 20 caractères.",
|
||||||
|
@ -578,6 +579,7 @@
|
||||||
"ALLOWED_CHARACTERS": "Caractères spéciaux autorisés",
|
"ALLOWED_CHARACTERS": "Caractères spéciaux autorisés",
|
||||||
"TOTAL": "Total",
|
"TOTAL": "Total",
|
||||||
"OVERRIDE": "Surcharger",
|
"OVERRIDE": "Surcharger",
|
||||||
|
"SINGLE_ACTIVE_REPLICATION": "Single active replication",
|
||||||
"ENABLED_RULE": "Activer la règle",
|
"ENABLED_RULE": "Activer la règle",
|
||||||
"OVERRIDE_INFO": "Surcharger",
|
"OVERRIDE_INFO": "Surcharger",
|
||||||
"OPERATION": "Opération",
|
"OPERATION": "Opération",
|
||||||
|
|
|
@ -76,6 +76,7 @@
|
||||||
"PULL_BASED": "원격 레지스트리의 리소스를 로컬 'Harbor'로 가져옵니다.",
|
"PULL_BASED": "원격 레지스트리의 리소스를 로컬 'Harbor'로 가져옵니다.",
|
||||||
"DESTINATION_NAMESPACE": "대상 네임스페이스를 지정합니다. 비어 있으면 리소스는 소스와 동일한 네임스페이스에 배치됩니다.",
|
"DESTINATION_NAMESPACE": "대상 네임스페이스를 지정합니다. 비어 있으면 리소스는 소스와 동일한 네임스페이스에 배치됩니다.",
|
||||||
"OVERRIDE": "동일한 이름의 리소스가 있는 경우 대상의 리소스를 재정의할지 여부를 지정합니다.",
|
"OVERRIDE": "동일한 이름의 리소스가 있는 경우 대상의 리소스를 재정의할지 여부를 지정합니다.",
|
||||||
|
"SINGLE_ACTIVE_REPLICATION": "Specify whether to skip execution until the previous active execution finishes, avoiding the execution of the same replication rules multiple times in parallel.",
|
||||||
"EMAIL": "이메일은 name@example.com과 같은 유효한 이메일 주소여야 합니다.",
|
"EMAIL": "이메일은 name@example.com과 같은 유효한 이메일 주소여야 합니다.",
|
||||||
"USER_NAME": "특수 문자를 포함할 수 없으며 최대 길이는 255자입니다.",
|
"USER_NAME": "특수 문자를 포함할 수 없으며 최대 길이는 255자입니다.",
|
||||||
"FULL_NAME": "최대 길이는 20자입니다.",
|
"FULL_NAME": "최대 길이는 20자입니다.",
|
||||||
|
@ -575,6 +576,7 @@
|
||||||
"ALLOWED_CHARACTERS": "허용되는 특수 문자",
|
"ALLOWED_CHARACTERS": "허용되는 특수 문자",
|
||||||
"TOTAL": "총",
|
"TOTAL": "총",
|
||||||
"OVERRIDE": "Override",
|
"OVERRIDE": "Override",
|
||||||
|
"SINGLE_ACTIVE_REPLICATION": "Single active replication",
|
||||||
"ENABLED_RULE": "규칙 활성화",
|
"ENABLED_RULE": "규칙 활성화",
|
||||||
"OVERRIDE_INFO": "Override",
|
"OVERRIDE_INFO": "Override",
|
||||||
"OPERATION": "작업",
|
"OPERATION": "작업",
|
||||||
|
|
|
@ -76,6 +76,7 @@
|
||||||
"PULL_BASED": "Trazer recursos do repositório remoto para o Harbor local.",
|
"PULL_BASED": "Trazer recursos do repositório remoto para o Harbor local.",
|
||||||
"DESTINATION_NAMESPACE": "Especificar o namespace de destino. Se vazio, os recursos serão colocados no mesmo namespace que a fonte.",
|
"DESTINATION_NAMESPACE": "Especificar o namespace de destino. Se vazio, os recursos serão colocados no mesmo namespace que a fonte.",
|
||||||
"OVERRIDE": "Sobrescrever recursos no destino se já existir com o mesmo nome.",
|
"OVERRIDE": "Sobrescrever recursos no destino se já existir com o mesmo nome.",
|
||||||
|
"SINGLE_ACTIVE_REPLICATION": "Specify whether to skip execution until the previous active execution finishes, avoiding the execution of the same replication rules multiple times in parallel.",
|
||||||
"EMAIL": "Deve ser um endereço de e-mail válido como nome@exemplo.com.",
|
"EMAIL": "Deve ser um endereço de e-mail válido como nome@exemplo.com.",
|
||||||
"USER_NAME": "Não pode conter caracteres especiais. Tamanho máximo de 255 caracteres.",
|
"USER_NAME": "Não pode conter caracteres especiais. Tamanho máximo de 255 caracteres.",
|
||||||
"FULL_NAME": "Tamanho máximo de 20 caracteres.",
|
"FULL_NAME": "Tamanho máximo de 20 caracteres.",
|
||||||
|
@ -576,6 +577,7 @@
|
||||||
"ALLOWED_CHARACTERS": "Símbolos permitidos",
|
"ALLOWED_CHARACTERS": "Símbolos permitidos",
|
||||||
"TOTAL": "Total",
|
"TOTAL": "Total",
|
||||||
"OVERRIDE": "Sobrescrever",
|
"OVERRIDE": "Sobrescrever",
|
||||||
|
"SINGLE_ACTIVE_REPLICATION": "Single active replication",
|
||||||
"ENABLED_RULE": "Habiltar regra",
|
"ENABLED_RULE": "Habiltar regra",
|
||||||
"OVERRIDE_INFO": "Sobrescrever",
|
"OVERRIDE_INFO": "Sobrescrever",
|
||||||
"CURRENT": "atual",
|
"CURRENT": "atual",
|
||||||
|
|
|
@ -76,6 +76,7 @@
|
||||||
"PULL_BASED": "Kaynakları uzak kayıt defterinden yerel Harbora çekin.",
|
"PULL_BASED": "Kaynakları uzak kayıt defterinden yerel Harbora çekin.",
|
||||||
"DESTINATION_NAMESPACE": "Hedef ad alanını belirtin. Boşsa, kaynaklar, kaynak ile aynı ad alanına yerleştirilir.",
|
"DESTINATION_NAMESPACE": "Hedef ad alanını belirtin. Boşsa, kaynaklar, kaynak ile aynı ad alanına yerleştirilir.",
|
||||||
"OVERRIDE": "Aynı adı taşıyan bir kaynak varsa, hedefteki kaynakları geçersiz kılmayacağınızı belirtin.",
|
"OVERRIDE": "Aynı adı taşıyan bir kaynak varsa, hedefteki kaynakları geçersiz kılmayacağınızı belirtin.",
|
||||||
|
"SINGLE_ACTIVE_REPLICATION": "Specify whether to skip execution until the previous active execution finishes, avoiding the execution of the same replication rules multiple times in parallel.",
|
||||||
"EMAIL": "E-posta, ad@example.com gibi geçerli bir e-posta adresi olmalıdır.",
|
"EMAIL": "E-posta, ad@example.com gibi geçerli bir e-posta adresi olmalıdır.",
|
||||||
"USER_NAME": "Özel karakterler içeremez ve maksimum uzunluk 255 karakter olmalıdır.",
|
"USER_NAME": "Özel karakterler içeremez ve maksimum uzunluk 255 karakter olmalıdır.",
|
||||||
"FULL_NAME": "Maksimum uzunluk 20 karakter olmalıdır.",
|
"FULL_NAME": "Maksimum uzunluk 20 karakter olmalıdır.",
|
||||||
|
@ -579,6 +580,7 @@
|
||||||
"ALLOWED_CHARACTERS": "İzin verilen özel karakterler",
|
"ALLOWED_CHARACTERS": "İzin verilen özel karakterler",
|
||||||
"TOTAL": "Toplam",
|
"TOTAL": "Toplam",
|
||||||
"OVERRIDE": "Geçersiz Kıl",
|
"OVERRIDE": "Geçersiz Kıl",
|
||||||
|
"SINGLE_ACTIVE_REPLICATION": "Single active replication",
|
||||||
"ENABLED_RULE": "Kuralı etkinleştir",
|
"ENABLED_RULE": "Kuralı etkinleştir",
|
||||||
"OVERRIDE_INFO": "Geçersiz Kıl",
|
"OVERRIDE_INFO": "Geçersiz Kıl",
|
||||||
"OPERATION": "Operasyon",
|
"OPERATION": "Operasyon",
|
||||||
|
|
|
@ -76,6 +76,7 @@
|
||||||
"PULL_BASED": "把资源由远端仓库拉取到本地Harbor。",
|
"PULL_BASED": "把资源由远端仓库拉取到本地Harbor。",
|
||||||
"DESTINATION_NAMESPACE": "指定目标名称空间。如果不填,资源会被放到和源相同的名称空间下。",
|
"DESTINATION_NAMESPACE": "指定目标名称空间。如果不填,资源会被放到和源相同的名称空间下。",
|
||||||
"OVERRIDE": "如果存在具有相同名称的资源,请指定是否覆盖目标上的资源。",
|
"OVERRIDE": "如果存在具有相同名称的资源,请指定是否覆盖目标上的资源。",
|
||||||
|
"SINGLE_ACTIVE_REPLICATION": "Specify whether to skip execution until the previous active execution finishes, avoiding the execution of the same replication rules multiple times in parallel.",
|
||||||
"EMAIL": "请使用正确的邮箱地址,比如name@example.com。",
|
"EMAIL": "请使用正确的邮箱地址,比如name@example.com。",
|
||||||
"USER_NAME": "不能包含特殊字符且长度不能超过255。",
|
"USER_NAME": "不能包含特殊字符且长度不能超过255。",
|
||||||
"FULL_NAME": "长度不能超过20。",
|
"FULL_NAME": "长度不能超过20。",
|
||||||
|
@ -576,6 +577,7 @@
|
||||||
"ALLOWED_CHARACTERS": "允许的特殊字符",
|
"ALLOWED_CHARACTERS": "允许的特殊字符",
|
||||||
"TOTAL": "总数",
|
"TOTAL": "总数",
|
||||||
"OVERRIDE": "覆盖",
|
"OVERRIDE": "覆盖",
|
||||||
|
"SINGLE_ACTIVE_REPLICATION": "Single active replication",
|
||||||
"ENABLED_RULE": "启用规则",
|
"ENABLED_RULE": "启用规则",
|
||||||
"OVERRIDE_INFO": "覆盖",
|
"OVERRIDE_INFO": "覆盖",
|
||||||
"CURRENT": "当前仓库",
|
"CURRENT": "当前仓库",
|
||||||
|
|
|
@ -113,6 +113,14 @@ func (r *replicationAPI) CreateReplicationPolicy(ctx context.Context, params ope
|
||||||
policy.CopyByChunk = *params.Policy.CopyByChunk
|
policy.CopyByChunk = *params.Policy.CopyByChunk
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if params.Policy.SingleActiveReplication != nil {
|
||||||
|
// Validate and assign SingleActiveReplication only for non-event_based triggers
|
||||||
|
if params.Policy.Trigger != nil && params.Policy.Trigger.Type == model.TriggerTypeEventBased && *params.Policy.SingleActiveReplication {
|
||||||
|
return r.SendError(ctx, fmt.Errorf("single active replication is not allowed for event_based triggers"))
|
||||||
|
}
|
||||||
|
policy.SingleActiveReplication = *params.Policy.SingleActiveReplication
|
||||||
|
}
|
||||||
|
|
||||||
id, err := r.ctl.CreatePolicy(ctx, policy)
|
id, err := r.ctl.CreatePolicy(ctx, policy)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return r.SendError(ctx, err)
|
return r.SendError(ctx, err)
|
||||||
|
@ -181,6 +189,14 @@ func (r *replicationAPI) UpdateReplicationPolicy(ctx context.Context, params ope
|
||||||
policy.CopyByChunk = *params.Policy.CopyByChunk
|
policy.CopyByChunk = *params.Policy.CopyByChunk
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if params.Policy.SingleActiveReplication != nil {
|
||||||
|
// Validate and assign SingleActiveReplication only for non-event_based triggers
|
||||||
|
if params.Policy.Trigger != nil && params.Policy.Trigger.Type == model.TriggerTypeEventBased && *params.Policy.SingleActiveReplication {
|
||||||
|
return r.SendError(ctx, fmt.Errorf("single active replication is not allowed for event_based triggers"))
|
||||||
|
}
|
||||||
|
policy.SingleActiveReplication = *params.Policy.SingleActiveReplication
|
||||||
|
}
|
||||||
|
|
||||||
if err := r.ctl.UpdatePolicy(ctx, policy); err != nil {
|
if err := r.ctl.UpdatePolicy(ctx, policy); err != nil {
|
||||||
return r.SendError(ctx, err)
|
return r.SendError(ctx, err)
|
||||||
}
|
}
|
||||||
|
@ -446,6 +462,7 @@ func convertReplicationPolicy(policy *repctlmodel.Policy) *models.ReplicationPol
|
||||||
Speed: &policy.Speed,
|
Speed: &policy.Speed,
|
||||||
UpdateTime: strfmt.DateTime(policy.UpdateTime),
|
UpdateTime: strfmt.DateTime(policy.UpdateTime),
|
||||||
CopyByChunk: &policy.CopyByChunk,
|
CopyByChunk: &policy.CopyByChunk,
|
||||||
|
SingleActiveReplication: &policy.SingleActiveReplication,
|
||||||
}
|
}
|
||||||
if policy.SrcRegistry != nil {
|
if policy.SrcRegistry != nil {
|
||||||
p.SrcRegistry = convertRegistry(policy.SrcRegistry)
|
p.SrcRegistry = convertRegistry(policy.SrcRegistry)
|
||||||
|
|
|
@ -151,7 +151,8 @@ replication_policy_payload = {
|
||||||
"deletion": False,
|
"deletion": False,
|
||||||
"override": True,
|
"override": True,
|
||||||
"speed": -1,
|
"speed": -1,
|
||||||
"copy_by_chunk": False
|
"copy_by_chunk": False,
|
||||||
|
"single_active_replication": False
|
||||||
}
|
}
|
||||||
create_replication_policy = Permission("{}/replication/policies".format(harbor_base_url), "POST", 201, replication_policy_payload, "id", id_from_header=True)
|
create_replication_policy = Permission("{}/replication/policies".format(harbor_base_url), "POST", 201, replication_policy_payload, "id", id_from_header=True)
|
||||||
list_replication_policy = Permission("{}/replication/policies".format(harbor_base_url), "GET", 200, replication_policy_payload)
|
list_replication_policy = Permission("{}/replication/policies".format(harbor_base_url), "GET", 200, replication_policy_payload)
|
||||||
|
@ -204,7 +205,8 @@ if "replication" in resources or "all" == resources:
|
||||||
"deletion": False,
|
"deletion": False,
|
||||||
"override": True,
|
"override": True,
|
||||||
"speed": -1,
|
"speed": -1,
|
||||||
"copy_by_chunk": False
|
"copy_by_chunk": False,
|
||||||
|
"single_active_replication": False
|
||||||
}
|
}
|
||||||
response = requests.post("{}/replication/policies".format(harbor_base_url), data=json.dumps(replication_policy_payload), verify=False, auth=(admin_user_name, admin_password), headers={"Content-Type": "application/json"})
|
response = requests.post("{}/replication/policies".format(harbor_base_url), data=json.dumps(replication_policy_payload), verify=False, auth=(admin_user_name, admin_password), headers={"Content-Type": "application/json"})
|
||||||
replication_policy_id = int(response.headers["Location"].split("/")[-1])
|
replication_policy_id = int(response.headers["Location"].split("/")[-1])
|
||||||
|
|
|
@ -36,19 +36,9 @@ else
|
||||||
rc=999
|
rc=999
|
||||||
fi
|
fi
|
||||||
rc=$?
|
rc=$?
|
||||||
## --------------------------------------------- Upload Harbor CI Logs -------------------------------------------
|
## --------------------------------------------- Package Harbor CI Logs -------------------------------------------
|
||||||
#timestamp=$(date +%s)
|
outfile="integration_logs.tar.gz"
|
||||||
#GIT_COMMIT=$(git rev-parse --short "$GITHUB_SHA")
|
sudo tar -zcvf $outfile output.xml log.html /var/log/harbor/*
|
||||||
#outfile="integration_logs_$timestamp$GIT_COMMIT.tar.gz"
|
pwd
|
||||||
#sudo tar -zcvf $outfile output.xml log.html /var/log/harbor/*
|
ls -lh $outfile
|
||||||
#if [ -f "$outfile" ]; then
|
|
||||||
# uploader $outfile $harbor_logs_bucket
|
|
||||||
# echo "----------------------------------------------"
|
|
||||||
# echo "Download test logs:"
|
|
||||||
# echo "https://storage.googleapis.com/harbor-ci-logs/$outfile"
|
|
||||||
# echo "----------------------------------------------"
|
|
||||||
#else
|
|
||||||
# echo "No log output file to upload"
|
|
||||||
#fi
|
|
||||||
|
|
||||||
exit $rc
|
exit $rc
|
||||||
|
|
|
@ -3,5 +3,5 @@ set -x
|
||||||
|
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
sudo make package_online GOBUILDTAGS="include_oss include_gcs" VERSIONTAG=dev-gitaction PKGVERSIONTAG=dev-gitaction UIVERSIONTAG=dev-gitaction GOBUILDIMAGE=golang:1.24.5 COMPILETAG=compile_golangimage TRIVYFLAG=true EXPORTERFLAG=true HTTPPROXY= PULL_BASE_FROM_DOCKERHUB=false
|
sudo make package_online GOBUILDTAGS="include_oss include_gcs" VERSIONTAG=dev-gitaction PKGVERSIONTAG=dev-gitaction UIVERSIONTAG=dev-gitaction GOBUILDIMAGE=golang:1.24.6 COMPILETAG=compile_golangimage TRIVYFLAG=true EXPORTERFLAG=true HTTPPROXY= PULL_BASE_FROM_DOCKERHUB=false
|
||||||
sudo make package_offline GOBUILDTAGS="include_oss include_gcs" VERSIONTAG=dev-gitaction PKGVERSIONTAG=dev-gitaction UIVERSIONTAG=dev-gitaction GOBUILDIMAGE=golang:1.24.5 COMPILETAG=compile_golangimage TRIVYFLAG=true EXPORTERFLAG=true HTTPPROXY= PULL_BASE_FROM_DOCKERHUB=false
|
sudo make package_offline GOBUILDTAGS="include_oss include_gcs" VERSIONTAG=dev-gitaction PKGVERSIONTAG=dev-gitaction UIVERSIONTAG=dev-gitaction GOBUILDIMAGE=golang:1.24.6 COMPILETAG=compile_golangimage TRIVYFLAG=true EXPORTERFLAG=true HTTPPROXY= PULL_BASE_FROM_DOCKERHUB=false
|
||||||
|
|
Loading…
Reference in New Issue