mirror of https://github.com/goharbor/harbor.git
Merge branch 'main' into arm64-ci-support
This commit is contained in:
commit
7c7915b625
10
README.md
10
README.md
|
@ -1,15 +1,13 @@
|
||||||
# Harbor
|
# Harbor
|
||||||
|
[](https://github.com/goharbor/harbor/actions/workflows/CI.yml)
|
||||||
[](https://github.com/goharbor/harbor/actions?query=event%3Apush+branch%3Amain+workflow%3ACI+)
|
|
||||||
[](https://codecov.io/gh/goharbor/harbor)
|
|
||||||
[](https://goreportcard.com/report/github.com/goharbor/harbor)
|
[](https://goreportcard.com/report/github.com/goharbor/harbor)
|
||||||
|
[](https://codecov.io/gh/goharbor/harbor)
|
||||||
[](https://bestpractices.coreinfrastructure.org/projects/2095)
|
[](https://bestpractices.coreinfrastructure.org/projects/2095)
|
||||||
[](https://www.codacy.com/gh/goharbor/harbor/dashboard?utm_source=github.com&utm_medium=referral&utm_content=goharbor/harbor&utm_campaign=Badge_Grade)
|
[](https://www.codacy.com/gh/goharbor/harbor/dashboard?utm_source=github.com&utm_medium=referral&utm_content=goharbor/harbor&utm_campaign=Badge_Grade)
|
||||||

|

|
||||||
[](https://www.googleapis.com/storage/v1/b/harbor-nightly/o)
|

|
||||||

|
|
||||||
[](https://app.fossa.com/projects/git%2Bgithub.com%2Fgoharbor%2Fharbor?ref=badge_shield)
|
[](https://app.fossa.com/projects/git%2Bgithub.com%2Fgoharbor%2Fharbor?ref=badge_shield)
|
||||||
[](https://artifacthub.io/packages/helm/harbor/harbor)
|
[](https://artifacthub.io/packages/helm/harbor/harbor)
|
||||||
</br>
|
</br>
|
||||||
|
|
||||||
|Community Meeting|
|
|Community Meeting|
|
||||||
|
|
|
@ -7321,6 +7321,10 @@ definitions:
|
||||||
type: string
|
type: string
|
||||||
description: 'The bandwidth limit of proxy cache, in Kbps (kilobits per second). It limits the communication between Harbor and the upstream registry, not the client and the Harbor.'
|
description: 'The bandwidth limit of proxy cache, in Kbps (kilobits per second). It limits the communication between Harbor and the upstream registry, not the client and the Harbor.'
|
||||||
x-nullable: true
|
x-nullable: true
|
||||||
|
max_upstream_conn:
|
||||||
|
type: string
|
||||||
|
description: 'The max connection per artifact to the upstream registry in current proxy cache project, if it is -1, no limit to upstream registry connections'
|
||||||
|
x-nullable: true
|
||||||
ProjectSummary:
|
ProjectSummary:
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
|
|
|
@ -77,6 +77,7 @@ type controller struct {
|
||||||
func (c *controller) Start(ctx context.Context, policy Policy, trigger string) (int64, error) {
|
func (c *controller) Start(ctx context.Context, policy Policy, trigger string) (int64, error) {
|
||||||
para := make(map[string]any)
|
para := make(map[string]any)
|
||||||
para["delete_untagged"] = policy.DeleteUntagged
|
para["delete_untagged"] = policy.DeleteUntagged
|
||||||
|
para["delete_tag"] = policy.DeleteTag
|
||||||
para["dry_run"] = policy.DryRun
|
para["dry_run"] = policy.DryRun
|
||||||
para["workers"] = policy.Workers
|
para["workers"] = policy.Workers
|
||||||
para["redis_url_reg"] = policy.ExtraAttrs["redis_url_reg"]
|
para["redis_url_reg"] = policy.ExtraAttrs["redis_url_reg"]
|
||||||
|
@ -205,6 +206,7 @@ func (c *controller) GetSchedule(ctx context.Context) (*scheduler.Schedule, erro
|
||||||
func (c *controller) CreateSchedule(ctx context.Context, cronType, cron string, policy Policy) (int64, error) {
|
func (c *controller) CreateSchedule(ctx context.Context, cronType, cron string, policy Policy) (int64, error) {
|
||||||
extras := make(map[string]any)
|
extras := make(map[string]any)
|
||||||
extras["delete_untagged"] = policy.DeleteUntagged
|
extras["delete_untagged"] = policy.DeleteUntagged
|
||||||
|
extras["delete_tag"] = policy.DeleteTag
|
||||||
extras["workers"] = policy.Workers
|
extras["workers"] = policy.Workers
|
||||||
return c.schedulerMgr.Schedule(ctx, job.GarbageCollectionVendorType, -1, cronType, cron, job.GarbageCollectionVendorType, policy, extras)
|
return c.schedulerMgr.Schedule(ctx, job.GarbageCollectionVendorType, -1, cronType, cron, job.GarbageCollectionVendorType, policy, extras)
|
||||||
}
|
}
|
||||||
|
@ -234,6 +236,7 @@ func convertTask(task *task.Task) *Task {
|
||||||
StatusMessage: task.StatusMessage,
|
StatusMessage: task.StatusMessage,
|
||||||
RunCount: task.RunCount,
|
RunCount: task.RunCount,
|
||||||
DeleteUntagged: task.GetBoolFromExtraAttrs("delete_untagged"),
|
DeleteUntagged: task.GetBoolFromExtraAttrs("delete_untagged"),
|
||||||
|
DeleteTag: task.GetBoolFromExtraAttrs("delete_tag"),
|
||||||
DryRun: task.GetBoolFromExtraAttrs("dry_run"),
|
DryRun: task.GetBoolFromExtraAttrs("dry_run"),
|
||||||
Workers: int(task.GetNumFromExtraAttrs("workers")),
|
Workers: int(task.GetNumFromExtraAttrs("workers")),
|
||||||
JobID: task.JobID,
|
JobID: task.JobID,
|
||||||
|
|
|
@ -41,6 +41,7 @@ func (g *gcCtrTestSuite) TestStart() {
|
||||||
dataMap := make(map[string]any)
|
dataMap := make(map[string]any)
|
||||||
p := Policy{
|
p := Policy{
|
||||||
DeleteUntagged: true,
|
DeleteUntagged: true,
|
||||||
|
DeleteTag: true,
|
||||||
ExtraAttrs: dataMap,
|
ExtraAttrs: dataMap,
|
||||||
}
|
}
|
||||||
id, err := g.ctl.Start(nil, p, task.ExecutionTriggerManual)
|
id, err := g.ctl.Start(nil, p, task.ExecutionTriggerManual)
|
||||||
|
@ -149,6 +150,7 @@ func (g *gcCtrTestSuite) TestCreateSchedule() {
|
||||||
dataMap := make(map[string]any)
|
dataMap := make(map[string]any)
|
||||||
p := Policy{
|
p := Policy{
|
||||||
DeleteUntagged: true,
|
DeleteUntagged: true,
|
||||||
|
DeleteTag: true,
|
||||||
ExtraAttrs: dataMap,
|
ExtraAttrs: dataMap,
|
||||||
Workers: 3,
|
Workers: 3,
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,6 +22,7 @@ import (
|
||||||
type Policy struct {
|
type Policy struct {
|
||||||
Trigger *Trigger `json:"trigger"`
|
Trigger *Trigger `json:"trigger"`
|
||||||
DeleteUntagged bool `json:"deleteuntagged"`
|
DeleteUntagged bool `json:"deleteuntagged"`
|
||||||
|
DeleteTag bool `json:"deletetag"`
|
||||||
DryRun bool `json:"dryrun"`
|
DryRun bool `json:"dryrun"`
|
||||||
Workers int `json:"workers"`
|
Workers int `json:"workers"`
|
||||||
ExtraAttrs map[string]any `json:"extra_attrs"`
|
ExtraAttrs map[string]any `json:"extra_attrs"`
|
||||||
|
@ -60,6 +61,7 @@ type Task struct {
|
||||||
StatusMessage string
|
StatusMessage string
|
||||||
RunCount int32
|
RunCount int32
|
||||||
DeleteUntagged bool
|
DeleteUntagged bool
|
||||||
|
DeleteTag bool
|
||||||
DryRun bool
|
DryRun bool
|
||||||
Workers int
|
Workers int
|
||||||
JobID string
|
JobID string
|
||||||
|
|
|
@ -64,6 +64,7 @@ type GarbageCollector struct {
|
||||||
logger logger.Interface
|
logger logger.Interface
|
||||||
redisURL string
|
redisURL string
|
||||||
deleteUntagged bool
|
deleteUntagged bool
|
||||||
|
deleteTag bool
|
||||||
dryRun bool
|
dryRun bool
|
||||||
// holds all of trashed artifacts' digest and repositories.
|
// holds all of trashed artifacts' digest and repositories.
|
||||||
// The source data of trashedArts is the table ArtifactTrash and it's only used as a dictionary by sweep when to delete a manifest.
|
// The source data of trashedArts is the table ArtifactTrash and it's only used as a dictionary by sweep when to delete a manifest.
|
||||||
|
@ -130,6 +131,12 @@ func (gc *GarbageCollector) parseParams(params job.Parameters) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// delete tag: default is to delete the tag in the backend storage
|
||||||
|
gc.deleteTag = true
|
||||||
|
if deleteTag, ok := params["delete_tag"].(bool); ok {
|
||||||
|
gc.deleteTag = deleteTag
|
||||||
|
}
|
||||||
|
|
||||||
// time window: default is 2 hours, and for testing/debugging, it can be set to 0.
|
// time window: default is 2 hours, and for testing/debugging, it can be set to 0.
|
||||||
gc.timeWindowHours = 2
|
gc.timeWindowHours = 2
|
||||||
timeWindow, exist := params["time_window"]
|
timeWindow, exist := params["time_window"]
|
||||||
|
@ -159,8 +166,8 @@ func (gc *GarbageCollector) parseParams(params job.Parameters) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
gc.logger.Infof("Garbage Collection parameters: [delete_untagged: %t, dry_run: %t, time_window: %d, workers: %d]",
|
gc.logger.Infof("Garbage Collection parameters: [delete_untagged: %t, delete_tag: %t, dry_run: %t, time_window: %d, workers: %d]",
|
||||||
gc.deleteUntagged, gc.dryRun, gc.timeWindowHours, gc.workers)
|
gc.deleteUntagged, gc.deleteTag, gc.dryRun, gc.timeWindowHours, gc.workers)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run implements the interface in job/Interface
|
// Run implements the interface in job/Interface
|
||||||
|
@ -332,34 +339,37 @@ func (gc *GarbageCollector) sweep(ctx job.Context) error {
|
||||||
skippedBlob := false
|
skippedBlob := false
|
||||||
if _, exist := gc.trashedArts[blob.Digest]; exist && blob.IsManifest() {
|
if _, exist := gc.trashedArts[blob.Digest]; exist && blob.IsManifest() {
|
||||||
for _, art := range gc.trashedArts[blob.Digest] {
|
for _, art := range gc.trashedArts[blob.Digest] {
|
||||||
// Harbor cannot know the existing tags in the backend from its database, so let the v2 DELETE manifest to remove all of them.
|
// if the deleteTag is enabled, call the distribution api to perform the tag deletion.
|
||||||
gc.logger.Infof("[%s][%d/%d] delete the manifest with registry v2 API: %s, %s, %s",
|
if gc.deleteTag {
|
||||||
uid, localIndex, total, art.RepositoryName, blob.ContentType, blob.Digest)
|
// Harbor cannot know the existing tags in the backend from its database, so let the v2 DELETE manifest to remove all of them.
|
||||||
if err := retry.Retry(func() error {
|
gc.logger.Infof("[%s][%d/%d] delete the manifest with registry v2 API: %s, %s, %s",
|
||||||
return ignoreNotFound(func() error {
|
uid, localIndex, total, art.RepositoryName, blob.ContentType, blob.Digest)
|
||||||
err := v2DeleteManifest(art.RepositoryName, blob.Digest)
|
if err := retry.Retry(func() error {
|
||||||
// if the system is in read-only mode, return an Abort error to skip retrying
|
return ignoreNotFound(func() error {
|
||||||
if err == readonly.Err {
|
err := v2DeleteManifest(art.RepositoryName, blob.Digest)
|
||||||
return retry.Abort(err)
|
// if the system is in read-only mode, return an Abort error to skip retrying
|
||||||
|
if err == readonly.Err {
|
||||||
|
return retry.Abort(err)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
}, retry.Callback(func(err error, sleep time.Duration) {
|
||||||
|
gc.logger.Infof("[%s][%d/%d] failed to exec v2DeleteManifest, error: %v, will retry again after: %s", uid, localIndex, total, err, sleep)
|
||||||
|
})); err != nil {
|
||||||
|
gc.logger.Errorf("[%s][%d/%d] failed to delete manifest with v2 API, %s, %s, %v", uid, localIndex, total, art.RepositoryName, blob.Digest, err)
|
||||||
|
if err := ignoreNotFound(func() error {
|
||||||
|
return gc.markDeleteFailed(ctx, blob)
|
||||||
|
}); err != nil {
|
||||||
|
gc.logger.Errorf("[%s][%d/%d] failed to call gc.markDeleteFailed() after v2DeleteManifest() error out: %s, %v", uid, localIndex, total, blob.Digest, err)
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
return err
|
// if the system is set to read-only mode, return directly
|
||||||
})
|
if err == readonly.Err {
|
||||||
}, retry.Callback(func(err error, sleep time.Duration) {
|
return err
|
||||||
gc.logger.Infof("[%s][%d/%d] failed to exec v2DeleteManifest, error: %v, will retry again after: %s", uid, localIndex, total, err, sleep)
|
}
|
||||||
})); err != nil {
|
skippedBlob = true
|
||||||
gc.logger.Errorf("[%s][%d/%d] failed to delete manifest with v2 API, %s, %s, %v", uid, localIndex, total, art.RepositoryName, blob.Digest, err)
|
continue
|
||||||
if err := ignoreNotFound(func() error {
|
|
||||||
return gc.markDeleteFailed(ctx, blob)
|
|
||||||
}); err != nil {
|
|
||||||
gc.logger.Errorf("[%s][%d/%d] failed to call gc.markDeleteFailed() after v2DeleteManifest() error out: %s, %v", uid, localIndex, total, blob.Digest, err)
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
// if the system is set to read-only mode, return directly
|
|
||||||
if err == readonly.Err {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
skippedBlob = true
|
|
||||||
continue
|
|
||||||
}
|
}
|
||||||
// for manifest, it has to delete the revisions folder of each repository
|
// for manifest, it has to delete the revisions folder of each repository
|
||||||
gc.logger.Infof("[%s][%d/%d] delete manifest from storage: %s", uid, localIndex, total, blob.Digest)
|
gc.logger.Infof("[%s][%d/%d] delete manifest from storage: %s", uid, localIndex, total, blob.Digest)
|
||||||
|
|
|
@ -159,6 +159,7 @@ func (suite *gcTestSuite) TestInit() {
|
||||||
}
|
}
|
||||||
params := map[string]any{
|
params := map[string]any{
|
||||||
"delete_untagged": true,
|
"delete_untagged": true,
|
||||||
|
"delete_tag": true,
|
||||||
"redis_url_reg": "redis url",
|
"redis_url_reg": "redis url",
|
||||||
"time_window": 1,
|
"time_window": 1,
|
||||||
"workers": float64(3),
|
"workers": float64(3),
|
||||||
|
@ -167,27 +168,33 @@ func (suite *gcTestSuite) TestInit() {
|
||||||
mock.OnAnything(gc.registryCtlClient, "Health").Return(nil)
|
mock.OnAnything(gc.registryCtlClient, "Health").Return(nil)
|
||||||
suite.Nil(gc.init(ctx, params))
|
suite.Nil(gc.init(ctx, params))
|
||||||
suite.True(gc.deleteUntagged)
|
suite.True(gc.deleteUntagged)
|
||||||
|
suite.True(gc.deleteTag)
|
||||||
suite.Equal(3, gc.workers)
|
suite.Equal(3, gc.workers)
|
||||||
|
|
||||||
params = map[string]any{
|
params = map[string]any{
|
||||||
"delete_untagged": "unsupported",
|
"delete_untagged": "unsupported",
|
||||||
|
"delete_tag": "unsupported",
|
||||||
"redis_url_reg": "redis url",
|
"redis_url_reg": "redis url",
|
||||||
}
|
}
|
||||||
suite.Nil(gc.init(ctx, params))
|
suite.Nil(gc.init(ctx, params))
|
||||||
suite.True(gc.deleteUntagged)
|
suite.True(gc.deleteUntagged)
|
||||||
|
suite.True(gc.deleteTag)
|
||||||
|
|
||||||
params = map[string]any{
|
params = map[string]any{
|
||||||
"delete_untagged": false,
|
"delete_untagged": false,
|
||||||
|
"delete_tag": false,
|
||||||
"redis_url_reg": "redis url",
|
"redis_url_reg": "redis url",
|
||||||
}
|
}
|
||||||
suite.Nil(gc.init(ctx, params))
|
suite.Nil(gc.init(ctx, params))
|
||||||
suite.False(gc.deleteUntagged)
|
suite.False(gc.deleteUntagged)
|
||||||
|
suite.False(gc.deleteTag)
|
||||||
|
|
||||||
params = map[string]any{
|
params = map[string]any{
|
||||||
"redis_url_reg": "redis url",
|
"redis_url_reg": "redis url",
|
||||||
}
|
}
|
||||||
suite.Nil(gc.init(ctx, params))
|
suite.Nil(gc.init(ctx, params))
|
||||||
suite.True(gc.deleteUntagged)
|
suite.True(gc.deleteUntagged)
|
||||||
|
suite.True(gc.deleteTag)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *gcTestSuite) TestStop() {
|
func (suite *gcTestSuite) TestStop() {
|
||||||
|
@ -210,6 +217,7 @@ func (suite *gcTestSuite) TestStop() {
|
||||||
registryCtlClient: suite.registryCtlClient,
|
registryCtlClient: suite.registryCtlClient,
|
||||||
artCtl: suite.artifactCtl,
|
artCtl: suite.artifactCtl,
|
||||||
deleteUntagged: true,
|
deleteUntagged: true,
|
||||||
|
deleteTag: true,
|
||||||
}
|
}
|
||||||
|
|
||||||
suite.Equal(errGcStop, gc.mark(ctx))
|
suite.Equal(errGcStop, gc.mark(ctx))
|
||||||
|
|
|
@ -25,4 +25,5 @@ const (
|
||||||
ProMetaReuseSysCVEAllowlist = "reuse_sys_cve_allowlist"
|
ProMetaReuseSysCVEAllowlist = "reuse_sys_cve_allowlist"
|
||||||
ProMetaAutoSBOMGen = "auto_sbom_generation"
|
ProMetaAutoSBOMGen = "auto_sbom_generation"
|
||||||
ProMetaProxySpeed = "proxy_speed_kb"
|
ProMetaProxySpeed = "proxy_speed_kb"
|
||||||
|
ProMetaMaxUpstreamConn = "max_upstream_conn"
|
||||||
)
|
)
|
||||||
|
|
|
@ -21,6 +21,7 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/goharbor/harbor/src/lib/log"
|
||||||
"github.com/goharbor/harbor/src/lib/orm"
|
"github.com/goharbor/harbor/src/lib/orm"
|
||||||
allowlist "github.com/goharbor/harbor/src/pkg/allowlist/models"
|
allowlist "github.com/goharbor/harbor/src/pkg/allowlist/models"
|
||||||
)
|
)
|
||||||
|
@ -169,6 +170,20 @@ func (p *Project) ProxyCacheSpeed() int32 {
|
||||||
return int32(speedInt)
|
return int32(speedInt)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MaxUpstreamConnection ...
|
||||||
|
func (p *Project) MaxUpstreamConnection() int {
|
||||||
|
countVal, exist := p.GetMetadata(ProMetaMaxUpstreamConn)
|
||||||
|
if !exist {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
cnt, err := strconv.ParseInt(countVal, 10, 32)
|
||||||
|
if err != nil {
|
||||||
|
log.Warningf("failed th parse the max_upstream_conn, val:%s error %v", countVal, err)
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return int(cnt)
|
||||||
|
}
|
||||||
|
|
||||||
// FilterByPublic returns orm.QuerySeter with public filter
|
// FilterByPublic returns orm.QuerySeter with public filter
|
||||||
func (p *Project) FilterByPublic(_ context.Context, qs orm.QuerySeter, _ string, value any) orm.QuerySeter {
|
func (p *Project) FilterByPublic(_ context.Context, qs orm.QuerySeter, _ string, value any) orm.QuerySeter {
|
||||||
subQuery := `SELECT project_id FROM project_metadata WHERE name = 'public' AND value = '%s'`
|
subQuery := `SELECT project_id FROM project_metadata WHERE name = 'public' AND value = '%s'`
|
||||||
|
|
|
@ -0,0 +1,79 @@
|
||||||
|
// Copyright Project Harbor Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package connection
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/go-redis/redis/v8"
|
||||||
|
|
||||||
|
"github.com/goharbor/harbor/src/lib/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ConLimiter is used to limit the number of connections to the upstream service
|
||||||
|
type ConnLimiter struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
// Limiter is a global connection limiter instance
|
||||||
|
var Limiter = &ConnLimiter{}
|
||||||
|
|
||||||
|
// Used to compare and increase connection number in redis
|
||||||
|
//
|
||||||
|
// KEYS[1]: key of max_conn_upstream
|
||||||
|
// ARGV[1]: max connection limit
|
||||||
|
var increaseWithLimitText = `
|
||||||
|
local current = tonumber(redis.call('GET', KEYS[1]) or '0')
|
||||||
|
local max = tonumber(ARGV[1])
|
||||||
|
|
||||||
|
if current + 1 <= max then
|
||||||
|
redis.call('INCRBY', KEYS[1], 1)
|
||||||
|
redis.call('EXPIRE', KEYS[1], 3600) -- set expire to avoid always lock
|
||||||
|
return 1
|
||||||
|
else
|
||||||
|
return 0
|
||||||
|
end
|
||||||
|
`
|
||||||
|
|
||||||
|
var acquireScript = redis.NewScript(increaseWithLimitText)
|
||||||
|
|
||||||
|
// Acquire tries to acquire a connection, returns true if successful
|
||||||
|
func (c *ConnLimiter) Acquire(ctx context.Context, rdb *redis.Client, key string, limit int) bool {
|
||||||
|
result, err := acquireScript.Run(ctx, rdb, []string{key}, fmt.Sprintf("%v", limit)).Int()
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("failed to get the connection lock in redis, error %v", err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
log.Debugf("Acquire script result is %d", result)
|
||||||
|
return result == 1
|
||||||
|
}
|
||||||
|
|
||||||
|
var decreaseText = `
|
||||||
|
local val = tonumber(redis.call("GET", KEYS[1]) or "0")
|
||||||
|
if val > 0 then
|
||||||
|
redis.call("DECR", KEYS[1])
|
||||||
|
end
|
||||||
|
return 0
|
||||||
|
`
|
||||||
|
|
||||||
|
var decreaseScript = redis.NewScript(decreaseText)
|
||||||
|
|
||||||
|
// Release releases a connection in redis
|
||||||
|
func (c *ConnLimiter) Release(ctx context.Context, rdb *redis.Client, key string) {
|
||||||
|
_, err := decreaseScript.Run(ctx, rdb, []string{key}).Int()
|
||||||
|
if err != nil {
|
||||||
|
log.Infof("release connection failed:%v", err)
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,59 @@
|
||||||
|
// Copyright Project Harbor Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package connection
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/go-redis/redis/v8"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestConnLimiter_Acquire_Release(t *testing.T) {
|
||||||
|
redisAddress := os.Getenv("REDIS_HOST")
|
||||||
|
redisHost := "localhost"
|
||||||
|
if len(redisAddress) > 0 {
|
||||||
|
redisHost = redisAddress
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
rdb := redis.NewClient(&redis.Options{
|
||||||
|
Addr: fmt.Sprintf("%s:6379", redisHost), // Redis server address
|
||||||
|
Password: "", // No password set
|
||||||
|
DB: 0, // Use default DB
|
||||||
|
})
|
||||||
|
key := "test_max_connection_key"
|
||||||
|
maxConn := 10
|
||||||
|
for range 10 {
|
||||||
|
result := Limiter.Acquire(ctx, rdb, key, maxConn)
|
||||||
|
assert.True(t, result)
|
||||||
|
}
|
||||||
|
// after max connection reached, it should be false
|
||||||
|
result2 := Limiter.Acquire(ctx, rdb, key, maxConn)
|
||||||
|
assert.False(t, result2)
|
||||||
|
|
||||||
|
for range 10 {
|
||||||
|
Limiter.Release(ctx, rdb, key)
|
||||||
|
}
|
||||||
|
|
||||||
|
// connection in redis should be 0 finally
|
||||||
|
n, err := rdb.Get(ctx, key).Int()
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, 0, n)
|
||||||
|
|
||||||
|
}
|
|
@ -113,6 +113,24 @@
|
||||||
</clr-toggle-container>
|
</clr-toggle-container>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
<div class="clr-row">
|
||||||
|
<div class="clr-col-2 flex-200"></div>
|
||||||
|
<div class="clr-col">
|
||||||
|
<clr-toggle-container class="mt-05">
|
||||||
|
<clr-toggle-wrapper>
|
||||||
|
<input
|
||||||
|
type="checkbox"
|
||||||
|
clrToggle
|
||||||
|
name="delete_tag"
|
||||||
|
id="delete_tag"
|
||||||
|
[(ngModel)]="shouldDeleteTag" />
|
||||||
|
<label class="font-weight-400" for="delete_tag">{{
|
||||||
|
'GC.DELETE_TAG' | translate
|
||||||
|
}}</label>
|
||||||
|
</clr-toggle-wrapper>
|
||||||
|
</clr-toggle-container>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
<div class="clr-row">
|
<div class="clr-row">
|
||||||
<div class="clr-col-2 flex-200">
|
<div class="clr-col-2 flex-200">
|
||||||
<button
|
<button
|
||||||
|
|
|
@ -42,6 +42,7 @@ export class GcComponent implements OnInit, OnDestroy {
|
||||||
@ViewChild(CronScheduleComponent)
|
@ViewChild(CronScheduleComponent)
|
||||||
cronScheduleComponent: CronScheduleComponent;
|
cronScheduleComponent: CronScheduleComponent;
|
||||||
shouldDeleteUntagged: boolean;
|
shouldDeleteUntagged: boolean;
|
||||||
|
shouldDeleteTag: boolean;
|
||||||
workerNum: number = 1;
|
workerNum: number = 1;
|
||||||
workerOptions: number[] = clone(WORKER_OPTIONS);
|
workerOptions: number[] = clone(WORKER_OPTIONS);
|
||||||
dryRunOnGoing: boolean = false;
|
dryRunOnGoing: boolean = false;
|
||||||
|
@ -60,6 +61,7 @@ export class GcComponent implements OnInit, OnDestroy {
|
||||||
ngOnInit() {
|
ngOnInit() {
|
||||||
this.getCurrentSchedule(true);
|
this.getCurrentSchedule(true);
|
||||||
this.getStatus();
|
this.getStatus();
|
||||||
|
this.shouldDeleteTag = true;
|
||||||
}
|
}
|
||||||
ngOnDestroy() {
|
ngOnDestroy() {
|
||||||
if (this.statusTimeout) {
|
if (this.statusTimeout) {
|
||||||
|
@ -133,9 +135,13 @@ export class GcComponent implements OnInit, OnDestroy {
|
||||||
this.shouldDeleteUntagged = JSON.parse(
|
this.shouldDeleteUntagged = JSON.parse(
|
||||||
gcHistory.job_parameters
|
gcHistory.job_parameters
|
||||||
).delete_untagged;
|
).delete_untagged;
|
||||||
|
this.shouldDeleteTag = JSON.parse(
|
||||||
|
gcHistory.job_parameters
|
||||||
|
).delete_tag;
|
||||||
this.workerNum = +JSON.parse(gcHistory.job_parameters).workers;
|
this.workerNum = +JSON.parse(gcHistory.job_parameters).workers;
|
||||||
} else {
|
} else {
|
||||||
this.shouldDeleteUntagged = false;
|
this.shouldDeleteUntagged = false;
|
||||||
|
this.shouldDeleteTag = false;
|
||||||
this.workerNum = 1;
|
this.workerNum = 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -151,6 +157,7 @@ export class GcComponent implements OnInit, OnDestroy {
|
||||||
schedule: {
|
schedule: {
|
||||||
parameters: {
|
parameters: {
|
||||||
delete_untagged: this.shouldDeleteUntagged,
|
delete_untagged: this.shouldDeleteUntagged,
|
||||||
|
delete_tag: this.shouldDeleteTag,
|
||||||
workers: +this.workerNum,
|
workers: +this.workerNum,
|
||||||
dry_run: false,
|
dry_run: false,
|
||||||
},
|
},
|
||||||
|
@ -177,6 +184,7 @@ export class GcComponent implements OnInit, OnDestroy {
|
||||||
schedule: {
|
schedule: {
|
||||||
parameters: {
|
parameters: {
|
||||||
delete_untagged: this.shouldDeleteUntagged,
|
delete_untagged: this.shouldDeleteUntagged,
|
||||||
|
delete_tag: this.shouldDeleteTag,
|
||||||
workers: +this.workerNum,
|
workers: +this.workerNum,
|
||||||
dry_run: true,
|
dry_run: true,
|
||||||
},
|
},
|
||||||
|
@ -209,6 +217,7 @@ export class GcComponent implements OnInit, OnDestroy {
|
||||||
schedule: {
|
schedule: {
|
||||||
parameters: {
|
parameters: {
|
||||||
delete_untagged: this.shouldDeleteUntagged,
|
delete_untagged: this.shouldDeleteUntagged,
|
||||||
|
delete_tag: this.shouldDeleteTag,
|
||||||
workers: +this.workerNum,
|
workers: +this.workerNum,
|
||||||
dry_run: false,
|
dry_run: false,
|
||||||
},
|
},
|
||||||
|
@ -234,6 +243,7 @@ export class GcComponent implements OnInit, OnDestroy {
|
||||||
schedule: {
|
schedule: {
|
||||||
parameters: {
|
parameters: {
|
||||||
delete_untagged: this.shouldDeleteUntagged,
|
delete_untagged: this.shouldDeleteUntagged,
|
||||||
|
delete_tag: this.shouldDeleteTag,
|
||||||
workers: +this.workerNum,
|
workers: +this.workerNum,
|
||||||
dry_run: false,
|
dry_run: false,
|
||||||
},
|
},
|
||||||
|
|
|
@ -1348,6 +1348,7 @@
|
||||||
"MSG_SCHEDULE_RESET": "Speicherbereinigungs-Intervall wurde zurückgesetzt",
|
"MSG_SCHEDULE_RESET": "Speicherbereinigungs-Intervall wurde zurückgesetzt",
|
||||||
"PARAMETERS": "Parameter",
|
"PARAMETERS": "Parameter",
|
||||||
"DELETE_UNTAGGED": "Erlaube Speicherbereinigung auf Artefakte ohne Tag",
|
"DELETE_UNTAGGED": "Erlaube Speicherbereinigung auf Artefakte ohne Tag",
|
||||||
|
"DELETE_TAG": "Allow garbage collection to remove tag files from backend storage",
|
||||||
"EXPLAIN": "Speicherbereinigung (Garbage Collection / GC) ist eine rechenintensive Operation, die die Registry-Leistung beeinflussen kann",
|
"EXPLAIN": "Speicherbereinigung (Garbage Collection / GC) ist eine rechenintensive Operation, die die Registry-Leistung beeinflussen kann",
|
||||||
"EXPLAIN_TIME_WINDOW": "Artifacts uploaded in the past 2 hours(the default window) are excluded from garbage collection",
|
"EXPLAIN_TIME_WINDOW": "Artifacts uploaded in the past 2 hours(the default window) are excluded from garbage collection",
|
||||||
"DRY_RUN_SUCCESS": "Probelauf erfolgreich gestartet",
|
"DRY_RUN_SUCCESS": "Probelauf erfolgreich gestartet",
|
||||||
|
|
|
@ -1351,6 +1351,7 @@
|
||||||
"MSG_SCHEDULE_RESET": "Garbage Collection schedule has been reset",
|
"MSG_SCHEDULE_RESET": "Garbage Collection schedule has been reset",
|
||||||
"PARAMETERS": "Parameters",
|
"PARAMETERS": "Parameters",
|
||||||
"DELETE_UNTAGGED": "Allow garbage collection on untagged artifacts",
|
"DELETE_UNTAGGED": "Allow garbage collection on untagged artifacts",
|
||||||
|
"DELETE_TAG": "Allow garbage collection to remove tag files from backend storage",
|
||||||
"EXPLAIN": "GC is a compute intensive operation that may impact registry performance",
|
"EXPLAIN": "GC is a compute intensive operation that may impact registry performance",
|
||||||
"EXPLAIN_TIME_WINDOW": "Artifacts uploaded in the past 2 hours(the default window) are excluded from garbage collection",
|
"EXPLAIN_TIME_WINDOW": "Artifacts uploaded in the past 2 hours(the default window) are excluded from garbage collection",
|
||||||
"DRY_RUN_SUCCESS": "Triggered dry run successfully",
|
"DRY_RUN_SUCCESS": "Triggered dry run successfully",
|
||||||
|
|
|
@ -1345,6 +1345,7 @@
|
||||||
"MSG_SCHEDULE_RESET": "Programación de Garbage Collection ha sido reiniciada",
|
"MSG_SCHEDULE_RESET": "Programación de Garbage Collection ha sido reiniciada",
|
||||||
"PARAMETERS": "Parametros",
|
"PARAMETERS": "Parametros",
|
||||||
"DELETE_UNTAGGED": "Permitir garbage collection en artefactos no tageados",
|
"DELETE_UNTAGGED": "Permitir garbage collection en artefactos no tageados",
|
||||||
|
"DELETE_TAG": "Allow garbage collection to remove tag files from backend storage",
|
||||||
"EXPLAIN": "GC es una operación que requiere un uso intensivo de recursos informáticos y que puede afectar el rendimiento del registro",
|
"EXPLAIN": "GC es una operación que requiere un uso intensivo de recursos informáticos y que puede afectar el rendimiento del registro",
|
||||||
"EXPLAIN_TIME_WINDOW": "Los artefactos cargados en las últimas 2 horas (la ventana predeterminada) se excluyen de la recolección de basura",
|
"EXPLAIN_TIME_WINDOW": "Los artefactos cargados en las últimas 2 horas (la ventana predeterminada) se excluyen de la recolección de basura",
|
||||||
"DRY_RUN_SUCCESS": "Activación de dry run satisfactorio",
|
"DRY_RUN_SUCCESS": "Activación de dry run satisfactorio",
|
||||||
|
|
|
@ -1350,6 +1350,7 @@
|
||||||
"MSG_SCHEDULE_RESET": "La planification de la purge a été réinitialisée",
|
"MSG_SCHEDULE_RESET": "La planification de la purge a été réinitialisée",
|
||||||
"PARAMETERS": "Paramètres",
|
"PARAMETERS": "Paramètres",
|
||||||
"DELETE_UNTAGGED": "Supprimer les artefacts non tagués",
|
"DELETE_UNTAGGED": "Supprimer les artefacts non tagués",
|
||||||
|
"DELETE_TAG": "Allow garbage collection to remove tag files from backend storage",
|
||||||
"EXPLAIN": "Purger est une opération gourmande en puissance de calcul qui peut impacter les performances du registre",
|
"EXPLAIN": "Purger est une opération gourmande en puissance de calcul qui peut impacter les performances du registre",
|
||||||
"EXPLAIN_TIME_WINDOW": "Les artefacts téléversés dans les dernières 2 heures (fenêtre de temps par défaut) sont exclues de la purge",
|
"EXPLAIN_TIME_WINDOW": "Les artefacts téléversés dans les dernières 2 heures (fenêtre de temps par défaut) sont exclues de la purge",
|
||||||
"DRY_RUN_SUCCESS": "Exécution à blanc déclenchée avec succès",
|
"DRY_RUN_SUCCESS": "Exécution à blanc déclenchée avec succès",
|
||||||
|
|
|
@ -1343,6 +1343,7 @@
|
||||||
"MSG_SCHEDULE_RESET": "가비지 컬렉션 일정이 초기화됐습니다",
|
"MSG_SCHEDULE_RESET": "가비지 컬렉션 일정이 초기화됐습니다",
|
||||||
"PARAMETERS": "파라미터",
|
"PARAMETERS": "파라미터",
|
||||||
"DELETE_UNTAGGED": "태그가 지정되지 않은 아티팩트에 대한 가비지 수집 허용",
|
"DELETE_UNTAGGED": "태그가 지정되지 않은 아티팩트에 대한 가비지 수집 허용",
|
||||||
|
"DELETE_TAG": "Allow garbage collection to remove tag files from backend storage",
|
||||||
"EXPLAIN": "가비지 컬렉션은 레지스트리 성능에 영향을 미칠 수 있는 커퓨팅 집약적 작업입니다.",
|
"EXPLAIN": "가비지 컬렉션은 레지스트리 성능에 영향을 미칠 수 있는 커퓨팅 집약적 작업입니다.",
|
||||||
"EXPLAIN_TIME_WINDOW": "지난 2시간(기본 기간) 동안 업로드된 아티팩트는 가비지 컬렉션에서 제외됩니다.",
|
"EXPLAIN_TIME_WINDOW": "지난 2시간(기본 기간) 동안 업로드된 아티팩트는 가비지 컬렉션에서 제외됩니다.",
|
||||||
"DRY_RUN_SUCCESS": "모의 테스트가 성공적으로 실행됐습니다",
|
"DRY_RUN_SUCCESS": "모의 테스트가 성공적으로 실행됐습니다",
|
||||||
|
|
|
@ -1345,6 +1345,7 @@
|
||||||
"MSG_SCHEDULE_RESET": "Agendamento da limpeza redefinido",
|
"MSG_SCHEDULE_RESET": "Agendamento da limpeza redefinido",
|
||||||
"PARAMETERS": "Parâmetros",
|
"PARAMETERS": "Parâmetros",
|
||||||
"DELETE_UNTAGGED": "Permitir coleta de artefatos sem tags",
|
"DELETE_UNTAGGED": "Permitir coleta de artefatos sem tags",
|
||||||
|
"DELETE_TAG": "Allow garbage collection to remove tag files from backend storage",
|
||||||
"EXPLAIN": "A limpeza exige recursos computacionais e pode impactar performance.",
|
"EXPLAIN": "A limpeza exige recursos computacionais e pode impactar performance.",
|
||||||
"EXPLAIN_TIME_WINDOW": "Artifacts uploaded in the past 2 hours(the default window) are excluded from garbage collection",
|
"EXPLAIN_TIME_WINDOW": "Artifacts uploaded in the past 2 hours(the default window) are excluded from garbage collection",
|
||||||
"DRY_RUN_SUCCESS": "Teste executado com sucesso",
|
"DRY_RUN_SUCCESS": "Teste executado com sucesso",
|
||||||
|
|
|
@ -1267,6 +1267,7 @@
|
||||||
"MSG_SCHEDULE_RESET": "Расписание для сборки мусора сброшено",
|
"MSG_SCHEDULE_RESET": "Расписание для сборки мусора сброшено",
|
||||||
"PARAMETERS": "Параметры",
|
"PARAMETERS": "Параметры",
|
||||||
"DELETE_UNTAGGED": "Разрешить сборку мусора для непомеченных артефактов",
|
"DELETE_UNTAGGED": "Разрешить сборку мусора для непомеченных артефактов",
|
||||||
|
"DELETE_TAG": "Allow garbage collection to remove tag files from backend storage",
|
||||||
"EXPLAIN": "GC — это ресурсоемкая операция, которая может повлиять на производительность реестра",
|
"EXPLAIN": "GC — это ресурсоемкая операция, которая может повлиять на производительность реестра",
|
||||||
"DRY_RUN_SUCCESS": "Сухой запуск успешно запущен"
|
"DRY_RUN_SUCCESS": "Сухой запуск успешно запущен"
|
||||||
},
|
},
|
||||||
|
|
|
@ -1349,6 +1349,7 @@
|
||||||
"MSG_SCHEDULE_RESET": "Çöp Toplama programı sıfırlandı",
|
"MSG_SCHEDULE_RESET": "Çöp Toplama programı sıfırlandı",
|
||||||
"PARAMETERS": "Parameters",
|
"PARAMETERS": "Parameters",
|
||||||
"DELETE_UNTAGGED": "Allow garbage collection on untagged artifacts",
|
"DELETE_UNTAGGED": "Allow garbage collection on untagged artifacts",
|
||||||
|
"DELETE_TAG": "Allow garbage collection to remove tag files from backend storage",
|
||||||
"EXPLAIN": "GC is a compute intensive operation that may impact registry performance",
|
"EXPLAIN": "GC is a compute intensive operation that may impact registry performance",
|
||||||
"EXPLAIN_TIME_WINDOW": "Artifacts uploaded in the past 2 hours(the default window) are excluded from garbage collection",
|
"EXPLAIN_TIME_WINDOW": "Artifacts uploaded in the past 2 hours(the default window) are excluded from garbage collection",
|
||||||
"DRY_RUN_SUCCESS": "Triggered dry run successfully",
|
"DRY_RUN_SUCCESS": "Triggered dry run successfully",
|
||||||
|
|
|
@ -1347,6 +1347,7 @@
|
||||||
"MSG_SCHEDULE_RESET": "垃圾回收定时任务已被重置",
|
"MSG_SCHEDULE_RESET": "垃圾回收定时任务已被重置",
|
||||||
"PARAMETERS": "参数",
|
"PARAMETERS": "参数",
|
||||||
"DELETE_UNTAGGED": "允许回收无 tag 的 artifacts",
|
"DELETE_UNTAGGED": "允许回收无 tag 的 artifacts",
|
||||||
|
"DELETE_TAG": "允许垃圾回收移除后端存储中的Tag文件",
|
||||||
"EXPLAIN": "垃圾回收是一个计算密集型操作,可能会影响仓库性能",
|
"EXPLAIN": "垃圾回收是一个计算密集型操作,可能会影响仓库性能",
|
||||||
"EXPLAIN_TIME_WINDOW": "在最近的两小时(默认窗口期)内被推送的 Artifacts 不会被当做垃圾回收的目标",
|
"EXPLAIN_TIME_WINDOW": "在最近的两小时(默认窗口期)内被推送的 Artifacts 不会被当做垃圾回收的目标",
|
||||||
"DRY_RUN_SUCCESS": "触发模拟运行成功",
|
"DRY_RUN_SUCCESS": "触发模拟运行成功",
|
||||||
|
|
|
@ -1343,6 +1343,7 @@
|
||||||
"MSG_SCHEDULE_RESET": "已重設垃圾回收排程",
|
"MSG_SCHEDULE_RESET": "已重設垃圾回收排程",
|
||||||
"PARAMETERS": "參數",
|
"PARAMETERS": "參數",
|
||||||
"DELETE_UNTAGGED": "允許對未標籤的檔案進行垃圾回收",
|
"DELETE_UNTAGGED": "允許對未標籤的檔案進行垃圾回收",
|
||||||
|
"DELETE_TAG": "允許垃圾回收移除後端儲存中的標籤檔案",
|
||||||
"EXPLAIN": "垃圾回收是一個密集計算的操作,可能影響 Registry 的效能",
|
"EXPLAIN": "垃圾回收是一個密集計算的操作,可能影響 Registry 的效能",
|
||||||
"EXPLAIN_TIME_WINDOW": "過去 2 小時(預設窗口)內上傳的檔案將不包括在垃圾回收中",
|
"EXPLAIN_TIME_WINDOW": "過去 2 小時(預設窗口)內上傳的檔案將不包括在垃圾回收中",
|
||||||
"DRY_RUN_SUCCESS": "成功觸發模擬執行",
|
"DRY_RUN_SUCCESS": "成功觸發模擬執行",
|
||||||
|
|
|
@ -19,6 +19,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -33,18 +34,21 @@ import (
|
||||||
httpLib "github.com/goharbor/harbor/src/lib/http"
|
httpLib "github.com/goharbor/harbor/src/lib/http"
|
||||||
"github.com/goharbor/harbor/src/lib/log"
|
"github.com/goharbor/harbor/src/lib/log"
|
||||||
"github.com/goharbor/harbor/src/lib/orm"
|
"github.com/goharbor/harbor/src/lib/orm"
|
||||||
|
"github.com/goharbor/harbor/src/lib/redis"
|
||||||
proModels "github.com/goharbor/harbor/src/pkg/project/models"
|
proModels "github.com/goharbor/harbor/src/pkg/project/models"
|
||||||
|
"github.com/goharbor/harbor/src/pkg/proxy/connection"
|
||||||
"github.com/goharbor/harbor/src/pkg/reg/model"
|
"github.com/goharbor/harbor/src/pkg/reg/model"
|
||||||
"github.com/goharbor/harbor/src/server/middleware"
|
"github.com/goharbor/harbor/src/server/middleware"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
contentLength = "Content-Length"
|
contentLength = "Content-Length"
|
||||||
contentType = "Content-Type"
|
contentType = "Content-Type"
|
||||||
dockerContentDigest = "Docker-Content-Digest"
|
dockerContentDigest = "Docker-Content-Digest"
|
||||||
etag = "Etag"
|
etag = "Etag"
|
||||||
ensureTagInterval = 10 * time.Second
|
ensureTagInterval = 10 * time.Second
|
||||||
ensureTagMaxRetry = 60
|
ensureTagMaxRetry = 60
|
||||||
|
upstreamRegistryLimitOnProject = "UPSTREAM_REGISTRY_LIMIT_ON_PROJECT" // if UPSTREAM_REGISTRY_LIMIT_ON_PROJECT is true, the upstream registry connection is based on project level, by default it is artifact level
|
||||||
)
|
)
|
||||||
|
|
||||||
var tooManyRequestsError = errors.New("too many requests to upstream registry").WithCode(errors.RateLimitCode)
|
var tooManyRequestsError = errors.New("too many requests to upstream registry").WithCode(errors.RateLimitCode)
|
||||||
|
@ -99,6 +103,22 @@ func handleBlob(w http.ResponseWriter, r *http.Request, next http.Handler) error
|
||||||
next.ServeHTTP(w, r)
|
next.ServeHTTP(w, r)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if p.MaxUpstreamConnection() > 0 {
|
||||||
|
client, err := redis.GetHarborClient()
|
||||||
|
if err != nil {
|
||||||
|
return errors.NewErrs(err)
|
||||||
|
}
|
||||||
|
key := upstreamRegistryConnectionKey(art)
|
||||||
|
log.Debugf("handle blob, upstream registry connection limit key: %s", key)
|
||||||
|
if !connection.Limiter.Acquire(ctx, client, key, p.MaxUpstreamConnection()) {
|
||||||
|
log.Infof("current connection exceed max connections to upstream registry")
|
||||||
|
// send http code 429 to client
|
||||||
|
return tooManyRequestsError
|
||||||
|
}
|
||||||
|
defer connection.Limiter.Release(context.Background(), client, key) // use background context in defer to avoid been canceled
|
||||||
|
}
|
||||||
|
|
||||||
size, reader, err := proxyCtl.ProxyBlob(ctx, p, art)
|
size, reader, err := proxyCtl.ProxyBlob(ctx, p, art)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -173,6 +193,15 @@ func defaultBlobURL(projectName string, name string, digest string) string {
|
||||||
return fmt.Sprintf("/v2/%s/library/%s/blobs/%s", projectName, name, digest)
|
return fmt.Sprintf("/v2/%s/library/%s/blobs/%s", projectName, name, digest)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// upstreamRegistryConnectionKey get upstream registry connection key
|
||||||
|
func upstreamRegistryConnectionKey(art lib.ArtifactInfo) string {
|
||||||
|
limitOnProject := os.Getenv(upstreamRegistryLimitOnProject)
|
||||||
|
if strings.EqualFold("true", limitOnProject) {
|
||||||
|
return fmt.Sprintf("{upstream_registry_connection}:%s", art.ProjectName)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("{upstream_registry_connection}:%s:%s", art.Repository, art.Digest)
|
||||||
|
}
|
||||||
|
|
||||||
func handleManifest(w http.ResponseWriter, r *http.Request, next http.Handler) error {
|
func handleManifest(w http.ResponseWriter, r *http.Request, next http.Handler) error {
|
||||||
ctx := r.Context()
|
ctx := r.Context()
|
||||||
art, p, proxyCtl, err := preCheck(ctx, true)
|
art, p, proxyCtl, err := preCheck(ctx, true)
|
||||||
|
@ -219,6 +248,20 @@ func handleManifest(w http.ResponseWriter, r *http.Request, next http.Handler) e
|
||||||
next.ServeHTTP(w, r)
|
next.ServeHTTP(w, r)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
if p.MaxUpstreamConnection() > 0 {
|
||||||
|
client, err := redis.GetHarborClient()
|
||||||
|
if err != nil {
|
||||||
|
return errors.NewErrs(err)
|
||||||
|
}
|
||||||
|
key := upstreamRegistryConnectionKey(art)
|
||||||
|
log.Debugf("handle manifest key %v", key)
|
||||||
|
if !connection.Limiter.Acquire(ctx, client, key, p.MaxUpstreamConnection()) {
|
||||||
|
log.Infof("current connection exceed max connections to upstream registry")
|
||||||
|
// send http code 429 to client
|
||||||
|
return tooManyRequestsError
|
||||||
|
}
|
||||||
|
defer connection.Limiter.Release(context.Background(), client, key) // use background context in defer to avoid been canceled
|
||||||
|
}
|
||||||
|
|
||||||
log.Debugf("the tag is %v, digest is %v", art.Tag, art.Digest)
|
log.Debugf("the tag is %v, digest is %v", art.Tag, art.Digest)
|
||||||
if r.Method == http.MethodHead {
|
if r.Method == http.MethodHead {
|
||||||
|
|
|
@ -100,6 +100,9 @@ func (g *gcAPI) kick(ctx context.Context, scheType string, cron string, paramete
|
||||||
if deleteUntagged, ok := parameters["delete_untagged"].(bool); ok {
|
if deleteUntagged, ok := parameters["delete_untagged"].(bool); ok {
|
||||||
policy.DeleteUntagged = deleteUntagged
|
policy.DeleteUntagged = deleteUntagged
|
||||||
}
|
}
|
||||||
|
if deleteTag, ok := parameters["delete_tag"].(bool); ok {
|
||||||
|
policy.DeleteTag = deleteTag
|
||||||
|
}
|
||||||
if workers, ok := parameters["workers"].(json.Number); ok {
|
if workers, ok := parameters["workers"].(json.Number); ok {
|
||||||
wInt, err := workers.Int64()
|
wInt, err := workers.Int64()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -124,6 +127,9 @@ func (g *gcAPI) kick(ctx context.Context, scheType string, cron string, paramete
|
||||||
if deleteUntagged, ok := parameters["delete_untagged"].(bool); ok {
|
if deleteUntagged, ok := parameters["delete_untagged"].(bool); ok {
|
||||||
policy.DeleteUntagged = deleteUntagged
|
policy.DeleteUntagged = deleteUntagged
|
||||||
}
|
}
|
||||||
|
if deleteTag, ok := parameters["delete_tag"].(bool); ok {
|
||||||
|
policy.DeleteTag = deleteTag
|
||||||
|
}
|
||||||
if workers, ok := parameters["workers"].(json.Number); ok {
|
if workers, ok := parameters["workers"].(json.Number); ok {
|
||||||
wInt, err := workers.Int64()
|
wInt, err := workers.Int64()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -163,9 +163,10 @@ func (a *projectAPI) CreateProject(ctx context.Context, params operation.CreateP
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ignore metadata.proxy_speed_kb for non-proxy-cache project
|
// ignore metadata.proxy_speed_kb and metadata.max_upstream_conn for non-proxy-cache project
|
||||||
if req.RegistryID == nil {
|
if req.RegistryID == nil {
|
||||||
req.Metadata.ProxySpeedKb = nil
|
req.Metadata.ProxySpeedKb = nil
|
||||||
|
req.Metadata.MaxUpstreamConn = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ignore enable_content_trust metadata for proxy cache project
|
// ignore enable_content_trust metadata for proxy cache project
|
||||||
|
@ -566,9 +567,10 @@ func (a *projectAPI) UpdateProject(ctx context.Context, params operation.UpdateP
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ignore metadata.proxy_speed_kb for non-proxy-cache project
|
// ignore metadata.proxy_speed_kb and metadata.max_upstream_conn for non-proxy-cache project
|
||||||
if params.Project.Metadata != nil && !p.IsProxy() {
|
if params.Project.Metadata != nil && !p.IsProxy() {
|
||||||
params.Project.Metadata.ProxySpeedKb = nil
|
params.Project.Metadata.ProxySpeedKb = nil
|
||||||
|
params.Project.Metadata.MaxUpstreamConn = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ignore enable_content_trust metadata for proxy cache project
|
// ignore enable_content_trust metadata for proxy cache project
|
||||||
|
@ -818,6 +820,12 @@ func (a *projectAPI) validateProjectReq(ctx context.Context, req *models.Project
|
||||||
return errors.BadRequestError(nil).WithMessagef("metadata.proxy_speed_kb should by an int32, but got: '%s', err: %s", *ps, err)
|
return errors.BadRequestError(nil).WithMessagef("metadata.proxy_speed_kb should by an int32, but got: '%s', err: %s", *ps, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if cnt := req.Metadata.MaxUpstreamConn; cnt != nil {
|
||||||
|
if _, err := strconv.ParseInt(*cnt, 10, 32); err != nil {
|
||||||
|
return errors.BadRequestError(nil).WithMessagef("metadata.max_upstream_conn should be an int, but got '%s', err: %s", *cnt, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if req.StorageLimit != nil {
|
if req.StorageLimit != nil {
|
||||||
|
|
|
@ -161,6 +161,12 @@ func (p *projectMetadataAPI) validate(metas map[string]string) (map[string]strin
|
||||||
return nil, errors.New(nil).WithCode(errors.BadRequestCode).WithMessagef("invalid value: %s", value)
|
return nil, errors.New(nil).WithCode(errors.BadRequestCode).WithMessagef("invalid value: %s", value)
|
||||||
}
|
}
|
||||||
metas[proModels.ProMetaProxySpeed] = strconv.FormatInt(v, 10)
|
metas[proModels.ProMetaProxySpeed] = strconv.FormatInt(v, 10)
|
||||||
|
case proModels.ProMetaMaxUpstreamConn:
|
||||||
|
v, err := strconv.ParseInt(value, 10, 32)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.New(nil).WithCode(errors.BadRequestCode).WithMessagef("invalid value: %s", value)
|
||||||
|
}
|
||||||
|
metas[proModels.ProMetaMaxUpstreamConn] = strconv.FormatInt(v, 10)
|
||||||
default:
|
default:
|
||||||
return nil, errors.New(nil).WithCode(errors.BadRequestCode).WithMessagef("invalid key: %s", key)
|
return nil, errors.New(nil).WithCode(errors.BadRequestCode).WithMessagef("invalid key: %s", key)
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,76 @@
|
||||||
|
// Copyright Project Harbor Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package handler
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
proModels "github.com/goharbor/harbor/src/pkg/project/models"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestValidate(t *testing.T) {
|
||||||
|
api := &projectMetadataAPI{}
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
metas map[string]string
|
||||||
|
expectErr bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "Invalid max upstream conn value",
|
||||||
|
metas: map[string]string{proModels.ProMetaMaxUpstreamConn: "invalid"},
|
||||||
|
expectErr: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "max upstream conn value 0",
|
||||||
|
metas: map[string]string{proModels.ProMetaMaxUpstreamConn: "0"},
|
||||||
|
expectErr: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "max upstream conn value -1",
|
||||||
|
metas: map[string]string{proModels.ProMetaMaxUpstreamConn: "-1"},
|
||||||
|
expectErr: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "normal max upstream conn value",
|
||||||
|
metas: map[string]string{proModels.ProMetaMaxUpstreamConn: "30"},
|
||||||
|
expectErr: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Unsupported key",
|
||||||
|
metas: map[string]string{"unsupported_key": "value"},
|
||||||
|
expectErr: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Empty map",
|
||||||
|
metas: map[string]string{},
|
||||||
|
expectErr: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
result, err := api.validate(tt.metas)
|
||||||
|
if tt.expectErr {
|
||||||
|
assert.Error(t, err)
|
||||||
|
assert.Nil(t, result)
|
||||||
|
} else {
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.NotNil(t, result)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
|
@ -31,10 +31,8 @@ import (
|
||||||
"github.com/goharbor/harbor/src/common/utils"
|
"github.com/goharbor/harbor/src/common/utils"
|
||||||
"github.com/goharbor/harbor/src/controller/robot"
|
"github.com/goharbor/harbor/src/controller/robot"
|
||||||
"github.com/goharbor/harbor/src/lib"
|
"github.com/goharbor/harbor/src/lib"
|
||||||
"github.com/goharbor/harbor/src/lib/config"
|
|
||||||
"github.com/goharbor/harbor/src/lib/errors"
|
"github.com/goharbor/harbor/src/lib/errors"
|
||||||
"github.com/goharbor/harbor/src/lib/log"
|
"github.com/goharbor/harbor/src/lib/log"
|
||||||
"github.com/goharbor/harbor/src/lib/q"
|
|
||||||
"github.com/goharbor/harbor/src/pkg/permission/types"
|
"github.com/goharbor/harbor/src/pkg/permission/types"
|
||||||
pkg "github.com/goharbor/harbor/src/pkg/robot/model"
|
pkg "github.com/goharbor/harbor/src/pkg/robot/model"
|
||||||
"github.com/goharbor/harbor/src/server/v2.0/handler/model"
|
"github.com/goharbor/harbor/src/server/v2.0/handler/model"
|
||||||
|
@ -87,6 +85,12 @@ func (rAPI *robotAPI) CreateRobot(ctx context.Context, params operation.CreateRo
|
||||||
case *local.SecurityContext:
|
case *local.SecurityContext:
|
||||||
creatorRef = int64(s.User().UserID)
|
creatorRef = int64(s.User().UserID)
|
||||||
case *robotSc.SecurityContext:
|
case *robotSc.SecurityContext:
|
||||||
|
if s.User() == nil {
|
||||||
|
return rAPI.SendError(ctx, errors.New(nil).WithMessage("invalid security context: empty robot account"))
|
||||||
|
}
|
||||||
|
if !isValidPermissionScope(params.Robot.Permissions, s.User().Permissions) {
|
||||||
|
return rAPI.SendError(ctx, errors.New(nil).WithMessagef("permission scope is invalid. It must be equal to or more restrictive than the creator robot's permissions: %s", s.User().Name).WithCode(errors.DENIED))
|
||||||
|
}
|
||||||
creatorRef = s.User().ID
|
creatorRef = s.User().ID
|
||||||
default:
|
default:
|
||||||
return rAPI.SendError(ctx, errors.New(nil).WithMessage("invalid security context"))
|
return rAPI.SendError(ctx, errors.New(nil).WithMessage("invalid security context"))
|
||||||
|
@ -102,25 +106,6 @@ func (rAPI *robotAPI) CreateRobot(ctx context.Context, params operation.CreateRo
|
||||||
return rAPI.SendError(ctx, err)
|
return rAPI.SendError(ctx, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, ok := sc.(*robotSc.SecurityContext); ok {
|
|
||||||
creatorRobots, err := rAPI.robotCtl.List(ctx, q.New(q.KeyWords{
|
|
||||||
"name": strings.TrimPrefix(sc.GetUsername(), config.RobotPrefix(ctx)),
|
|
||||||
"project_id": r.ProjectID,
|
|
||||||
}), &robot.Option{
|
|
||||||
WithPermission: true,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return rAPI.SendError(ctx, err)
|
|
||||||
}
|
|
||||||
if len(creatorRobots) == 0 {
|
|
||||||
return rAPI.SendError(ctx, errors.DeniedError(nil))
|
|
||||||
}
|
|
||||||
|
|
||||||
if !isValidPermissionScope(params.Robot.Permissions, creatorRobots[0].Permissions) {
|
|
||||||
return rAPI.SendError(ctx, errors.New(nil).WithMessagef("permission scope is invalid. It must be equal to or more restrictive than the creator robot's permissions: %s", creatorRobots[0].Name).WithCode(errors.DENIED))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
rid, pwd, err := rAPI.robotCtl.Create(ctx, r)
|
rid, pwd, err := rAPI.robotCtl.Create(ctx, r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return rAPI.SendError(ctx, err)
|
return rAPI.SendError(ctx, err)
|
||||||
|
|
Loading…
Reference in New Issue