mirror of https://github.com/kubevela/kubevela.git
Feat: support shared resource (#4213)
Signed-off-by: Somefive <yd219913@alibaba-inc.com>
This commit is contained in:
parent
4c90e90fff
commit
9c7d3f408d
|
|
@ -18,6 +18,7 @@ package v1alpha1
|
|||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/utils/strings/slices"
|
||||
|
||||
"github.com/oam-dev/kubevela/pkg/oam"
|
||||
)
|
||||
|
|
@ -66,6 +67,29 @@ type ResourcePolicyRuleSelector struct {
|
|||
OAMResourceTypes []string `json:"oamTypes"`
|
||||
TraitTypes []string `json:"traitTypes"`
|
||||
ResourceTypes []string `json:"resourceTypes"`
|
||||
ResourceNames []string `json:"resourceNames"`
|
||||
}
|
||||
|
||||
// Match check if current rule selector match the target resource
|
||||
func (in *ResourcePolicyRuleSelector) Match(manifest *unstructured.Unstructured) bool {
|
||||
var compName, compType, oamType, traitType, resourceType, resourceName string
|
||||
if labels := manifest.GetLabels(); labels != nil {
|
||||
compName = labels[oam.LabelAppComponent]
|
||||
compType = labels[oam.WorkloadTypeLabel]
|
||||
oamType = labels[oam.LabelOAMResourceType]
|
||||
traitType = labels[oam.TraitTypeLabel]
|
||||
}
|
||||
resourceType = manifest.GetKind()
|
||||
resourceName = manifest.GetName()
|
||||
match := func(src []string, val string) (found bool) {
|
||||
return val != "" && slices.Contains(src, val)
|
||||
}
|
||||
return match(in.CompNames, compName) ||
|
||||
match(in.CompTypes, compType) ||
|
||||
match(in.OAMResourceTypes, oamType) ||
|
||||
match(in.TraitTypes, traitType) ||
|
||||
match(in.ResourceTypes, resourceType) ||
|
||||
match(in.ResourceNames, resourceName)
|
||||
}
|
||||
|
||||
// GarbageCollectStrategy the strategy for target resource to recycle
|
||||
|
|
@ -84,23 +108,7 @@ const (
|
|||
// FindStrategy find gc strategy for target resource
|
||||
func (in GarbageCollectPolicySpec) FindStrategy(manifest *unstructured.Unstructured) *GarbageCollectStrategy {
|
||||
for _, rule := range in.Rules {
|
||||
var compName, compType, oamType, traitType string
|
||||
if labels := manifest.GetLabels(); labels != nil {
|
||||
compName = labels[oam.LabelAppComponent]
|
||||
compType = labels[oam.WorkloadTypeLabel]
|
||||
oamType = labels[oam.LabelOAMResourceType]
|
||||
traitType = labels[oam.TraitTypeLabel]
|
||||
}
|
||||
match := func(src []string, val string) (found bool) {
|
||||
for _, _val := range src {
|
||||
found = found || _val == val
|
||||
}
|
||||
return val != "" && found
|
||||
}
|
||||
if match(rule.Selector.CompNames, compName) ||
|
||||
match(rule.Selector.CompTypes, compType) ||
|
||||
match(rule.Selector.OAMResourceTypes, oamType) ||
|
||||
match(rule.Selector.TraitTypes, traitType) {
|
||||
if rule.Selector.Match(manifest) {
|
||||
return &rule.Strategy
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -16,6 +16,8 @@ limitations under the License.
|
|||
|
||||
package v1alpha1
|
||||
|
||||
import "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
||||
const (
|
||||
// TopologyPolicyType refers to the type of topology policy
|
||||
TopologyPolicyType = "topology"
|
||||
|
|
@ -23,6 +25,8 @@ const (
|
|||
OverridePolicyType = "override"
|
||||
// DebugPolicyType refers to the type of debug policy
|
||||
DebugPolicyType = "debug"
|
||||
// SharedResourcePolicyType refers to the type of shared resource policy
|
||||
SharedResourcePolicyType = "shared-resource"
|
||||
)
|
||||
|
||||
// TopologyPolicySpec defines the spec of topology policy
|
||||
|
|
@ -53,3 +57,23 @@ type OverridePolicySpec struct {
|
|||
Components []EnvComponentPatch `json:"components,omitempty"`
|
||||
Selector []string `json:"selector,omitempty"`
|
||||
}
|
||||
|
||||
// SharedResourcePolicySpec defines the spec of shared-resource policy
|
||||
type SharedResourcePolicySpec struct {
|
||||
Rules []SharedResourcePolicyRule `json:"rules"`
|
||||
}
|
||||
|
||||
// SharedResourcePolicyRule defines the rule for sharing resources
|
||||
type SharedResourcePolicyRule struct {
|
||||
Selector ResourcePolicyRuleSelector `json:"selector"`
|
||||
}
|
||||
|
||||
// FindStrategy return if the target resource should be shared
|
||||
func (in SharedResourcePolicySpec) FindStrategy(manifest *unstructured.Unstructured) bool {
|
||||
for _, rule := range in.Rules {
|
||||
if rule.Selector.Match(manifest) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,69 @@
|
|||
/*
|
||||
Copyright 2022 The KubeVela Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
)
|
||||
|
||||
func TestSharedResourcePolicySpec_FindStrategy(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
rules []SharedResourcePolicyRule
|
||||
input *unstructured.Unstructured
|
||||
matched bool
|
||||
}{
|
||||
"shared resource rule resourceName match": {
|
||||
rules: []SharedResourcePolicyRule{{
|
||||
Selector: ResourcePolicyRuleSelector{ResourceNames: []string{"example"}},
|
||||
}},
|
||||
input: &unstructured.Unstructured{Object: map[string]interface{}{
|
||||
"metadata": map[string]interface{}{
|
||||
"name": "example",
|
||||
},
|
||||
}},
|
||||
matched: true,
|
||||
},
|
||||
"shared resource rule resourceType match": {
|
||||
rules: []SharedResourcePolicyRule{{
|
||||
Selector: ResourcePolicyRuleSelector{ResourceTypes: []string{"ConfigMap", "Namespace"}},
|
||||
}},
|
||||
input: &unstructured.Unstructured{Object: map[string]interface{}{
|
||||
"kind": "Namespace",
|
||||
}},
|
||||
matched: true,
|
||||
},
|
||||
"shared resource rule mismatch": {
|
||||
rules: []SharedResourcePolicyRule{{
|
||||
Selector: ResourcePolicyRuleSelector{ResourceNames: []string{"mismatch"}},
|
||||
}},
|
||||
input: &unstructured.Unstructured{Object: map[string]interface{}{
|
||||
"kind": "Namespace",
|
||||
}},
|
||||
matched: false,
|
||||
},
|
||||
}
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
r := require.New(t)
|
||||
spec := SharedResourcePolicySpec{Rules: tc.rules}
|
||||
r.Equal(tc.matched, spec.FindStrategy(tc.input))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
@ -635,6 +635,11 @@ func (in *ResourcePolicyRuleSelector) DeepCopyInto(out *ResourcePolicyRuleSelect
|
|||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.ResourceNames != nil {
|
||||
in, out := &in.ResourceNames, &out.ResourceNames
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePolicyRuleSelector.
|
||||
|
|
@ -647,6 +652,44 @@ func (in *ResourcePolicyRuleSelector) DeepCopy() *ResourcePolicyRuleSelector {
|
|||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *SharedResourcePolicyRule) DeepCopyInto(out *SharedResourcePolicyRule) {
|
||||
*out = *in
|
||||
in.Selector.DeepCopyInto(&out.Selector)
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharedResourcePolicyRule.
|
||||
func (in *SharedResourcePolicyRule) DeepCopy() *SharedResourcePolicyRule {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(SharedResourcePolicyRule)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *SharedResourcePolicySpec) DeepCopyInto(out *SharedResourcePolicySpec) {
|
||||
*out = *in
|
||||
if in.Rules != nil {
|
||||
in, out := &in.Rules, &out.Rules
|
||||
*out = make([]SharedResourcePolicyRule, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharedResourcePolicySpec.
|
||||
func (in *SharedResourcePolicySpec) DeepCopy() *SharedResourcePolicySpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(SharedResourcePolicySpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *TopologyPolicySpec) DeepCopyInto(out *TopologyPolicySpec) {
|
||||
*out = *in
|
||||
|
|
|
|||
Binary file not shown.
|
After Width: | Height: | Size: 154 KiB |
Binary file not shown.
|
After Width: | Height: | Size: 113 KiB |
|
|
@ -0,0 +1,47 @@
|
|||
# Shared Resource
|
||||
|
||||
### Background
|
||||
|
||||
In KubeVela, by default, application **owns** resources.
|
||||
It means that resources create by the application should only be controlled by the application that creates it.
|
||||
|
||||
So there are basically two requirements for application creating resources:
|
||||
1. The resource must not exist before the application creating it. It exists, there will be a resource conflict error.
|
||||
2. The resource is expected to be only manageable through its creator. "Others" should not be able to modify it or edit it.
|
||||
|
||||
While dispatching resources, the application will
|
||||
1. Check if resource exists. If exists, check its labels.
|
||||
If "app.oam.dev/name" and "app.oam.dev/namespace" equals to the application's name and namespace, it means this resource is previously created by the same application and the dispatching operation now will become an update operation.
|
||||
The two labels identify the owner of the resource.
|
||||
2. If resource exists, but no label found, then this resource is created before this application. At this time, the application will report a resource conflict error.
|
||||
3. If resource exists, and the labels point to another application, then this resource is managed by other applications. At this time, the current application will also report a resource conflict error.
|
||||
|
||||
With these checks, different applications cannot manage the same resource.
|
||||
|
||||
### Usage
|
||||
|
||||
However, there are scenarios that these two requirements are not met. One of the scenarios is sharing across different Applications.
|
||||
For example, each application wants to create a ConfigMap, but their ConfigMaps are the same.
|
||||
|
||||
To achieve that, KubeVela application could utilize the `shared-resource` policy to make it possible.
|
||||
|
||||
#### create
|
||||
|
||||
When one resource is created as sharing resource, one special annotation `app.oam.dev/shared-by` will be added to the resource.
|
||||
It will record the "sharer" of the resource in time order. The application that firstly creates the resource will set its owner labels to itself.
|
||||
Then it will add itself to the sharer annotation.
|
||||
|
||||
#### share
|
||||
|
||||
When another application comes and wants to share the resource, it will check if the resource is sharable, aka there is at least one sharer in the sharer annotation.
|
||||
If it is sharable, it will add itself to the sharer annotation, but not modify the content of the resource.
|
||||
|
||||
#### delete
|
||||
|
||||
With this mechanism, only the owner of the resource can modify the resource (including updating and state-keeping). Other sharer can only see that resource.
|
||||
When the owner of the resource is gone (application is deleted or do not use this resource anymore), it will give the owner of the application to the next sharer. If no sharer exists, it will finally delete that resource.
|
||||
|
||||
See the following figures for details.
|
||||
|
||||

|
||||

|
||||
|
|
@ -0,0 +1,91 @@
|
|||
## How to share resources across applications
|
||||
|
||||
Sometimes, you may want different applications to share the same resource.
|
||||
For example, you might have various applications that needs the same namespace to exist.
|
||||
In this case, you can use the `shared-resource` policy to declare which resources should be shared.
|
||||
|
||||
### Usage
|
||||
|
||||
```yaml
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: app1
|
||||
spec:
|
||||
components:
|
||||
- name: ns1
|
||||
type: k8s-objects
|
||||
properties:
|
||||
objects:
|
||||
- apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: example
|
||||
- name: cm1
|
||||
type: k8s-objects
|
||||
properties:
|
||||
objects:
|
||||
- apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: cm1
|
||||
namespace: example
|
||||
data:
|
||||
key: value1
|
||||
policies:
|
||||
- name: shared-resource
|
||||
type: shared-resource
|
||||
properties:
|
||||
rules:
|
||||
- selector:
|
||||
resourceTypes: ["Namespace"]
|
||||
```
|
||||
|
||||
```yaml
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: app2
|
||||
spec:
|
||||
components:
|
||||
- name: ns2
|
||||
type: k8s-objects
|
||||
properties:
|
||||
objects:
|
||||
- apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: example
|
||||
- name: cm2
|
||||
type: k8s-objects
|
||||
properties:
|
||||
objects:
|
||||
- apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: cm2
|
||||
namespace: example
|
||||
data:
|
||||
key: value2
|
||||
policies:
|
||||
- name: shared-resource
|
||||
type: shared-resource
|
||||
properties:
|
||||
rules:
|
||||
- selector:
|
||||
resourceTypes: ["Namespace"]
|
||||
```
|
||||
|
||||
The above two applications will dispatch the same namespace "example".
|
||||
They will create two different ConfigMap inside namespace "example" respectively.
|
||||
|
||||
Both application use the `shared-resource` policy and declared the namespace resource as shared.
|
||||
In this way, there will be no conflict for creating the same namespace.
|
||||
If the `shared-resource` policy is not used, the second application will report error after it finds that the namespace "example" is managed by the first application.
|
||||
|
||||
The namespace will only be recycled when both applications are removed.
|
||||
|
||||
### Working Detail
|
||||
|
||||
One of the problem for sharing resource is that what will happen if different application holds different configuration for the shared resource.
|
||||
In the `shared-resource` policy, all sharers will be recorded by time order. The first sharer will be able to write the resource while other sharers can only read it. After the first sharer is deleted, it will give the control of the resource to the next sharer. If no sharer is handling it, the resource will be finally removed.
|
||||
|
|
@ -368,6 +368,7 @@ func (p *Parser) parsePoliciesFromRevision(ctx context.Context, af *Appfile) (er
|
|||
switch policy.Type {
|
||||
case v1alpha1.GarbageCollectPolicyType:
|
||||
case v1alpha1.ApplyOncePolicyType:
|
||||
case v1alpha1.SharedResourcePolicyType:
|
||||
case v1alpha1.EnvBindingPolicyType:
|
||||
case v1alpha1.TopologyPolicyType:
|
||||
case v1alpha1.OverridePolicyType:
|
||||
|
|
@ -393,6 +394,7 @@ func (p *Parser) parsePolicies(ctx context.Context, af *Appfile) (err error) {
|
|||
switch policy.Type {
|
||||
case v1alpha1.GarbageCollectPolicyType:
|
||||
case v1alpha1.ApplyOncePolicyType:
|
||||
case v1alpha1.SharedResourcePolicyType:
|
||||
case v1alpha1.EnvBindingPolicyType:
|
||||
case v1alpha1.TopologyPolicyType:
|
||||
case v1alpha1.DebugPolicyType:
|
||||
|
|
|
|||
|
|
@ -210,4 +210,7 @@ const (
|
|||
|
||||
// AnnotationApplicationGroup indicates the group of the Application to use to apply resources
|
||||
AnnotationApplicationGroup = "app.oam.dev/group"
|
||||
|
||||
// AnnotationAppSharedBy records who share the application
|
||||
AnnotationAppSharedBy = "app.oam.dev/shared-by"
|
||||
)
|
||||
|
|
|
|||
|
|
@ -55,3 +55,12 @@ func ParseApplyOncePolicy(app *v1beta1.Application) (*v1alpha1.ApplyOncePolicySp
|
|||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// ParseSharedResourcePolicy parse shared-resource policy
|
||||
func ParseSharedResourcePolicy(app *v1beta1.Application) (*v1alpha1.SharedResourcePolicySpec, error) {
|
||||
spec := &v1alpha1.SharedResourcePolicySpec{}
|
||||
if exists, err := parsePolicy(app, v1alpha1.SharedResourcePolicyType, spec); exists {
|
||||
return spec, err
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -81,3 +81,29 @@ func TestParseApplyOncePolicy(t *testing.T) {
|
|||
r.NoError(err)
|
||||
r.Equal(policySpec, spec)
|
||||
}
|
||||
|
||||
func TestParseSharedResourcePolicy(t *testing.T) {
|
||||
r := require.New(t)
|
||||
app := &v1beta1.Application{Spec: v1beta1.ApplicationSpec{
|
||||
Policies: []v1beta1.AppPolicy{{Type: "example"}},
|
||||
}}
|
||||
spec, err := ParseSharedResourcePolicy(app)
|
||||
r.NoError(err)
|
||||
r.Nil(spec)
|
||||
app.Spec.Policies = append(app.Spec.Policies, v1beta1.AppPolicy{
|
||||
Type: "shared-resource",
|
||||
Properties: &runtime.RawExtension{Raw: []byte("bad value")},
|
||||
})
|
||||
_, err = ParseSharedResourcePolicy(app)
|
||||
r.Error(err)
|
||||
policySpec := &v1alpha1.SharedResourcePolicySpec{
|
||||
Rules: []v1alpha1.SharedResourcePolicyRule{{
|
||||
Selector: v1alpha1.ResourcePolicyRuleSelector{ResourceNames: []string{"example"}},
|
||||
}}}
|
||||
bs, err := json.Marshal(policySpec)
|
||||
r.NoError(err)
|
||||
app.Spec.Policies[1].Properties.Raw = bs
|
||||
spec, err = ParseSharedResourcePolicy(app)
|
||||
r.NoError(err)
|
||||
r.Equal(policySpec, spec)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -27,6 +27,8 @@ import (
|
|||
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
|
||||
"github.com/oam-dev/kubevela/pkg/multicluster"
|
||||
"github.com/oam-dev/kubevela/pkg/oam"
|
||||
"github.com/oam-dev/kubevela/pkg/utils/apply"
|
||||
)
|
||||
|
||||
type resourceCacheEntry struct {
|
||||
|
|
@ -41,12 +43,14 @@ type resourceCacheEntry struct {
|
|||
}
|
||||
|
||||
type resourceCache struct {
|
||||
app *v1beta1.Application
|
||||
cli client.Client
|
||||
m map[string]*resourceCacheEntry
|
||||
}
|
||||
|
||||
func newResourceCache(cli client.Client) *resourceCache {
|
||||
func newResourceCache(cli client.Client, app *v1beta1.Application) *resourceCache {
|
||||
return &resourceCache{
|
||||
app: app,
|
||||
cli: cli,
|
||||
m: map[string]*resourceCacheEntry{},
|
||||
}
|
||||
|
|
@ -97,9 +101,24 @@ func (cache *resourceCache) get(ctx context.Context, mr v1beta1.ManagedResource)
|
|||
entry.err = errors.Wrapf(err, "failed to get resource %s", key)
|
||||
}
|
||||
} else {
|
||||
entry.exists = true
|
||||
entry.exists = cache.exists(entry.obj)
|
||||
}
|
||||
entry.loaded = true
|
||||
}
|
||||
return entry
|
||||
}
|
||||
|
||||
func (cache *resourceCache) exists(manifest *unstructured.Unstructured) bool {
|
||||
if cache.app == nil {
|
||||
return true
|
||||
}
|
||||
appKey, controlledBy := apply.GetAppKey(cache.app), apply.GetControlledBy(manifest)
|
||||
if appKey == controlledBy {
|
||||
return true
|
||||
}
|
||||
annotations := manifest.GetAnnotations()
|
||||
if annotations == nil || annotations[oam.AnnotationAppSharedBy] == "" {
|
||||
return false
|
||||
}
|
||||
return apply.ContainsSharer(annotations[oam.AnnotationAppSharedBy], cache.app)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -21,37 +21,39 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
v13 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
v12 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
|
||||
common2 "github.com/oam-dev/kubevela/apis/core.oam.dev/common"
|
||||
apicommon "github.com/oam-dev/kubevela/apis/core.oam.dev/common"
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
|
||||
"github.com/oam-dev/kubevela/pkg/oam"
|
||||
"github.com/oam-dev/kubevela/pkg/utils/common"
|
||||
)
|
||||
|
||||
func TestResourceCache(t *testing.T) {
|
||||
cli := fake.NewClientBuilder().WithScheme(common.Scheme).Build()
|
||||
cache := newResourceCache(cli)
|
||||
cache := newResourceCache(cli, nil)
|
||||
r := require.New(t)
|
||||
createMR := func(name string) v1beta1.ManagedResource {
|
||||
return v1beta1.ManagedResource{
|
||||
ClusterObjectReference: common2.ClusterObjectReference{
|
||||
ObjectReference: v1.ObjectReference{
|
||||
ClusterObjectReference: apicommon.ClusterObjectReference{
|
||||
ObjectReference: corev1.ObjectReference{
|
||||
Name: name,
|
||||
Kind: "Deployment",
|
||||
APIVersion: v13.SchemeGroupVersion.String(),
|
||||
APIVersion: appsv1.SchemeGroupVersion.String(),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
r.NoError(cli.Create(context.Background(), &v13.Deployment{ObjectMeta: v12.ObjectMeta{Name: "resource-1"}}))
|
||||
r.NoError(cli.Create(context.Background(), &v13.Deployment{ObjectMeta: v12.ObjectMeta{Name: "resource-2"}}))
|
||||
r.NoError(cli.Create(context.Background(), &v13.Deployment{ObjectMeta: v12.ObjectMeta{Name: "resource-3"}}))
|
||||
r.NoError(cli.Create(context.Background(), &v13.Deployment{ObjectMeta: v12.ObjectMeta{Name: "resource-4"}}))
|
||||
r.NoError(cli.Create(context.Background(), &v13.Deployment{ObjectMeta: v12.ObjectMeta{Name: "resource-6"}}))
|
||||
ti := v12.Now()
|
||||
r.NoError(cli.Create(context.Background(), &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: "resource-1"}}))
|
||||
r.NoError(cli.Create(context.Background(), &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: "resource-2"}}))
|
||||
r.NoError(cli.Create(context.Background(), &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: "resource-3"}}))
|
||||
r.NoError(cli.Create(context.Background(), &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: "resource-4"}}))
|
||||
r.NoError(cli.Create(context.Background(), &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: "resource-6"}}))
|
||||
ti := metav1.Now()
|
||||
rt1 := &v1beta1.ResourceTracker{
|
||||
Spec: v1beta1.ResourceTrackerSpec{
|
||||
ManagedResources: []v1beta1.ManagedResource{
|
||||
|
|
@ -61,7 +63,7 @@ func TestResourceCache(t *testing.T) {
|
|||
},
|
||||
}
|
||||
rt2 := &v1beta1.ResourceTracker{
|
||||
ObjectMeta: v12.ObjectMeta{DeletionTimestamp: &ti},
|
||||
ObjectMeta: metav1.ObjectMeta{DeletionTimestamp: &ti},
|
||||
Spec: v1beta1.ResourceTrackerSpec{
|
||||
ManagedResources: []v1beta1.ManagedResource{
|
||||
createMR("resource-1"),
|
||||
|
|
@ -126,3 +128,23 @@ func TestResourceCache(t *testing.T) {
|
|||
r.True(entry.loaded)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResourceCacheExistenceCheck(t *testing.T) {
|
||||
cli := fake.NewClientBuilder().WithScheme(common.Scheme).Build()
|
||||
cache := newResourceCache(cli, &v1beta1.Application{ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app",
|
||||
Namespace: "test",
|
||||
}})
|
||||
r := require.New(t)
|
||||
createResource := func(appName, appNs, sharedBy string) *unstructured.Unstructured {
|
||||
return &unstructured.Unstructured{Object: map[string]interface{}{
|
||||
"metadata": map[string]interface{}{
|
||||
"labels": map[string]interface{}{oam.LabelAppName: appName, oam.LabelAppNamespace: appNs},
|
||||
"annotations": map[string]interface{}{oam.AnnotationAppSharedBy: sharedBy},
|
||||
},
|
||||
}}
|
||||
}
|
||||
r.True(cache.exists(createResource("app", "test", "")))
|
||||
r.False(cache.exists(createResource("app-no-shared-by", "test", "")))
|
||||
r.True(cache.exists(createResource("app-shared-by", "ex", "x/y,test/app,ex/app-shared-by")))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -129,7 +129,11 @@ func (h *resourceKeeper) dispatch(ctx context.Context, manifests []*unstructured
|
|||
errs := parallel.Run(func(manifest *unstructured.Unstructured) error {
|
||||
applyCtx := multicluster.ContextWithClusterName(ctx, oam.GetCluster(manifest))
|
||||
applyCtx = auth.ContextWithUserInfo(applyCtx, h.app)
|
||||
return h.applicator.Apply(applyCtx, manifest, applyOpts...)
|
||||
ao := applyOpts
|
||||
if h.isShared(manifest) {
|
||||
ao = append([]apply.ApplyOption{apply.SharedByApp(h.app)}, ao...)
|
||||
}
|
||||
return h.applicator.Apply(applyCtx, manifest, ao...)
|
||||
}, manifests, MaxDispatchConcurrent)
|
||||
return velaerrors.AggregateErrors(errs.([]error))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -40,8 +40,10 @@ import (
|
|||
"github.com/oam-dev/kubevela/pkg/monitor/metrics"
|
||||
"github.com/oam-dev/kubevela/pkg/multicluster"
|
||||
"github.com/oam-dev/kubevela/pkg/oam"
|
||||
"github.com/oam-dev/kubevela/pkg/oam/util"
|
||||
"github.com/oam-dev/kubevela/pkg/resourcetracker"
|
||||
"github.com/oam-dev/kubevela/pkg/utils"
|
||||
"github.com/oam-dev/kubevela/pkg/utils/apply"
|
||||
version2 "github.com/oam-dev/kubevela/version"
|
||||
)
|
||||
|
||||
|
|
@ -309,6 +311,22 @@ func (h *gcHandler) deleteIndependentComponent(ctx context.Context, mr v1beta1.M
|
|||
return nil
|
||||
}
|
||||
|
||||
func (h *gcHandler) deleteSharedManagedResource(ctx context.Context, manifest *unstructured.Unstructured, sharedBy string) error {
|
||||
parts := strings.Split(apply.FirstSharer(sharedBy), "/")
|
||||
appName, appNs := "", metav1.NamespaceDefault
|
||||
if len(parts) == 1 {
|
||||
appName = parts[0]
|
||||
} else if len(parts) == 2 {
|
||||
appName, appNs = parts[1], parts[0]
|
||||
}
|
||||
util.AddAnnotations(manifest, map[string]string{oam.AnnotationAppSharedBy: sharedBy})
|
||||
util.AddLabels(manifest, map[string]string{
|
||||
oam.LabelAppName: appName,
|
||||
oam.LabelAppNamespace: appNs,
|
||||
})
|
||||
return h.Client.Update(ctx, manifest)
|
||||
}
|
||||
|
||||
func (h *gcHandler) deleteManagedResource(ctx context.Context, mr v1beta1.ManagedResource, rt *v1beta1.ResourceTracker) error {
|
||||
entry := h.cache.get(ctx, mr)
|
||||
if entry.gcExecutorRT != rt {
|
||||
|
|
@ -318,7 +336,17 @@ func (h *gcHandler) deleteManagedResource(ctx context.Context, mr v1beta1.Manage
|
|||
return entry.err
|
||||
}
|
||||
if entry.exists {
|
||||
if err := h.Client.Delete(multicluster.ContextWithClusterName(ctx, mr.Cluster), entry.obj); err != nil && !kerrors.IsNotFound(err) {
|
||||
_ctx := multicluster.ContextWithClusterName(ctx, mr.Cluster)
|
||||
if annotations := entry.obj.GetAnnotations(); annotations != nil && annotations[oam.AnnotationAppSharedBy] != "" {
|
||||
sharedBy := apply.RemoveSharer(annotations[oam.AnnotationAppSharedBy], h.app)
|
||||
if sharedBy != "" {
|
||||
if err := h.deleteSharedManagedResource(_ctx, entry.obj, sharedBy); err != nil {
|
||||
return errors.Wrapf(err, "failed to remove sharer from resource %s", mr.ResourceKey())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
if err := h.Client.Delete(_ctx, entry.obj); err != nil && !kerrors.IsNotFound(err) {
|
||||
return errors.Wrapf(err, "failed to delete resource %s", mr.ResourceKey())
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,20 +19,23 @@ package resourcekeeper
|
|||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/crossplane/crossplane-runtime/pkg/meta"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
v13 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
v12 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha1"
|
||||
|
|
@ -40,7 +43,9 @@ import (
|
|||
"github.com/oam-dev/kubevela/pkg/features"
|
||||
"github.com/oam-dev/kubevela/pkg/multicluster"
|
||||
"github.com/oam-dev/kubevela/pkg/oam"
|
||||
"github.com/oam-dev/kubevela/pkg/resourcetracker"
|
||||
"github.com/oam-dev/kubevela/pkg/utils"
|
||||
"github.com/oam-dev/kubevela/pkg/utils/apply"
|
||||
"github.com/oam-dev/kubevela/version"
|
||||
)
|
||||
|
||||
|
|
@ -50,11 +55,11 @@ var _ = Describe("Test ResourceKeeper garbage collection", func() {
|
|||
|
||||
BeforeEach(func() {
|
||||
namespace = "test-ns-" + utils.RandomString(4)
|
||||
Expect(testClient.Create(context.Background(), &v1.Namespace{ObjectMeta: v12.ObjectMeta{Name: namespace}})).Should(Succeed())
|
||||
Expect(testClient.Create(context.Background(), &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}})).Should(Succeed())
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
ns := &v1.Namespace{}
|
||||
ns := &corev1.Namespace{}
|
||||
Expect(testClient.Get(context.Background(), types.NamespacedName{Name: namespace}, ns)).Should(Succeed())
|
||||
Expect(testClient.Delete(context.Background(), ns)).Should(Succeed())
|
||||
})
|
||||
|
|
@ -66,7 +71,7 @@ var _ = Describe("Test ResourceKeeper garbage collection", func() {
|
|||
cli := multicluster.NewFakeClient(testClient)
|
||||
cli.AddCluster("worker", workerClient)
|
||||
cli.AddCluster("worker-2", workerClient)
|
||||
app := &v1beta1.Application{ObjectMeta: v12.ObjectMeta{Name: "gc-app", Namespace: namespace}}
|
||||
app := &v1beta1.Application{ObjectMeta: metav1.ObjectMeta{Name: "gc-app", Namespace: namespace}}
|
||||
bs, err := json.Marshal(&v1alpha1.EnvBindingSpec{
|
||||
Envs: []v1alpha1.EnvConfig{{
|
||||
Placement: v1alpha1.EnvPlacement{ClusterSelector: &common.ClusterSelector{Name: "worker"}},
|
||||
|
|
@ -134,13 +139,13 @@ var _ = Describe("Test ResourceKeeper garbage collection", func() {
|
|||
checkRTExists(multicluster.ContextWithClusterName(ctx, "worker-2"), rt4.GetName(), false)
|
||||
Expect(app.GetAnnotations()[oam.AnnotationKubeVelaVersion]).Should(Equal("v1.2.0"))
|
||||
|
||||
crd := &v13.CustomResourceDefinition{}
|
||||
crd := &apiextensionsv1.CustomResourceDefinition{}
|
||||
Expect(workerClient.Get(ctx, types.NamespacedName{Name: "resourcetrackers.core.oam.dev"}, crd)).Should(Succeed())
|
||||
Expect(workerClient.Delete(ctx, crd)).Should(Succeed())
|
||||
Eventually(func(g Gomega) {
|
||||
g.Expect(workerClient.List(ctx, &v1beta1.ResourceTrackerList{})).ShouldNot(Succeed())
|
||||
}, 10*time.Second).Should(Succeed())
|
||||
v12.SetMetaDataAnnotation(&app.ObjectMeta, oam.AnnotationKubeVelaVersion, "master")
|
||||
metav1.SetMetaDataAnnotation(&app.ObjectMeta, oam.AnnotationKubeVelaVersion, "master")
|
||||
version.VelaVersion = "master"
|
||||
Expect(cli.Update(ctx, app)).Should(Succeed())
|
||||
Expect(h.GarbageCollectLegacyResourceTrackers(ctx)).Should(Succeed())
|
||||
|
|
@ -151,4 +156,69 @@ var _ = Describe("Test ResourceKeeper garbage collection", func() {
|
|||
checkRTExists(ctx, rt5.GetName(), true)
|
||||
})
|
||||
|
||||
It("Test gcHandler garbage collect shared resources", func() {
|
||||
ctx := context.Background()
|
||||
cli := testClient
|
||||
app := &v1beta1.Application{ObjectMeta: metav1.ObjectMeta{Name: "app", Namespace: namespace}}
|
||||
|
||||
keeper := &resourceKeeper{
|
||||
Client: cli,
|
||||
app: app,
|
||||
applicator: apply.NewAPIApplicator(cli),
|
||||
cache: newResourceCache(cli, app),
|
||||
}
|
||||
h := gcHandler{resourceKeeper: keeper, cfg: newGCConfig()}
|
||||
h._currentRT = &v1beta1.ResourceTracker{}
|
||||
t := metav1.Now()
|
||||
h._currentRT.SetDeletionTimestamp(&t)
|
||||
h._currentRT.SetFinalizers([]string{resourcetracker.Finalizer})
|
||||
createResource := func(name, appName, appNs, sharedBy string) *unstructured.Unstructured {
|
||||
return &unstructured.Unstructured{Object: map[string]interface{}{
|
||||
"apiVersion": "v1",
|
||||
"kind": "ConfigMap",
|
||||
"metadata": map[string]interface{}{
|
||||
"name": name,
|
||||
"namespace": namespace,
|
||||
"annotations": map[string]interface{}{oam.AnnotationAppSharedBy: sharedBy},
|
||||
"labels": map[string]interface{}{
|
||||
oam.LabelAppName: appName,
|
||||
oam.LabelAppNamespace: appNs,
|
||||
},
|
||||
},
|
||||
}}
|
||||
}
|
||||
By("Test delete normal resource")
|
||||
o1 := createResource("o1", "app", namespace, "")
|
||||
h._currentRT.AddManagedResource(o1, false, "test")
|
||||
Expect(cli.Create(ctx, o1)).Should(Succeed())
|
||||
h.cache.registerResourceTrackers(h._currentRT)
|
||||
Expect(h.Finalize(ctx)).Should(Succeed())
|
||||
Eventually(func(g Gomega) {
|
||||
g.Expect(cli.Get(ctx, client.ObjectKeyFromObject(o1), o1)).Should(Satisfy(errors.IsNotFound))
|
||||
}, 5*time.Second).Should(Succeed())
|
||||
|
||||
By("Test delete resource shared by others")
|
||||
o2 := createResource("o2", "app", namespace, fmt.Sprintf("%s/app,x/y", namespace))
|
||||
h._currentRT.AddManagedResource(o2, false, "test")
|
||||
Expect(cli.Create(ctx, o2)).Should(Succeed())
|
||||
h.cache.registerResourceTrackers(h._currentRT)
|
||||
Expect(h.Finalize(ctx)).Should(Succeed())
|
||||
Eventually(func(g Gomega) {
|
||||
g.Expect(cli.Get(ctx, client.ObjectKeyFromObject(o2), o2)).Should(Succeed())
|
||||
g.Expect(o2.GetAnnotations()[oam.AnnotationAppSharedBy]).Should(Equal("x/y"))
|
||||
g.Expect(o2.GetLabels()[oam.LabelAppNamespace]).Should(Equal("x"))
|
||||
g.Expect(o2.GetLabels()[oam.LabelAppName]).Should(Equal("y"))
|
||||
}, 5*time.Second).Should(Succeed())
|
||||
|
||||
By("Test delete resource shared by self")
|
||||
o3 := createResource("o3", "app", namespace, fmt.Sprintf("%s/app", namespace))
|
||||
h._currentRT.AddManagedResource(o3, false, "test")
|
||||
Expect(cli.Create(ctx, o3)).Should(Succeed())
|
||||
h.cache.registerResourceTrackers(h._currentRT)
|
||||
Expect(h.Finalize(ctx)).Should(Succeed())
|
||||
Eventually(func(g Gomega) {
|
||||
Expect(cli.Get(ctx, client.ObjectKeyFromObject(o3), o3)).Should(Satisfy(errors.IsNotFound))
|
||||
}, 5*time.Second).Should(Succeed())
|
||||
})
|
||||
|
||||
})
|
||||
|
|
|
|||
|
|
@ -22,9 +22,9 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
v13 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
v12 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
|
@ -46,10 +46,10 @@ func TestResourceKeeperGarbageCollect(t *testing.T) {
|
|||
|
||||
rtMaps := map[int64]*v1beta1.ResourceTracker{}
|
||||
cmMaps := map[int]*unstructured.Unstructured{}
|
||||
crMaps := map[int]*v13.ControllerRevision{}
|
||||
crMaps := map[int]*appsv1.ControllerRevision{}
|
||||
|
||||
crRT := &v1beta1.ResourceTracker{
|
||||
ObjectMeta: v12.ObjectMeta{Name: "app-comp-rev", Labels: map[string]string{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "app-comp-rev", Labels: map[string]string{
|
||||
oam.LabelAppName: "app",
|
||||
oam.LabelAppNamespace: "default",
|
||||
oam.LabelAppUID: "uid",
|
||||
|
|
@ -62,7 +62,7 @@ func TestResourceKeeperGarbageCollect(t *testing.T) {
|
|||
|
||||
createRT := func(gen int64) {
|
||||
_rt := &v1beta1.ResourceTracker{
|
||||
ObjectMeta: v12.ObjectMeta{Name: fmt.Sprintf("app-v%d", gen), Labels: map[string]string{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("app-v%d", gen), Labels: map[string]string{
|
||||
oam.LabelAppName: "app",
|
||||
oam.LabelAppNamespace: "default",
|
||||
oam.LabelAppUID: "uid",
|
||||
|
|
@ -82,15 +82,17 @@ func TestResourceKeeperGarbageCollect(t *testing.T) {
|
|||
cm := &unstructured.Unstructured{}
|
||||
cm.SetName(fmt.Sprintf("cm-%d", i))
|
||||
cm.SetNamespace("default")
|
||||
cm.SetGroupVersionKind(v1.SchemeGroupVersion.WithKind("ConfigMap"))
|
||||
cm.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("ConfigMap"))
|
||||
cm.SetLabels(map[string]string{
|
||||
oam.LabelAppComponent: fmt.Sprintf("comp-%d", compID),
|
||||
oam.LabelAppNamespace: "default",
|
||||
oam.LabelAppName: "app",
|
||||
})
|
||||
r.NoError(cli.Create(ctx, cm))
|
||||
cmMaps[i] = cm
|
||||
}
|
||||
if _, exists := crMaps[compID]; !exists {
|
||||
cr := &v13.ControllerRevision{Data: runtime.RawExtension{Raw: []byte(`{}`)}}
|
||||
cr := &appsv1.ControllerRevision{Data: runtime.RawExtension{Raw: []byte(`{}`)}}
|
||||
cr.SetName(fmt.Sprintf("cr-comp-%d", compID))
|
||||
cr.SetNamespace("default")
|
||||
cr.SetLabels(map[string]string{
|
||||
|
|
@ -111,7 +113,7 @@ func TestResourceKeeperGarbageCollect(t *testing.T) {
|
|||
n := 0
|
||||
for _, v := range cmMaps {
|
||||
o := &unstructured.Unstructured{}
|
||||
o.SetGroupVersionKind(v1.SchemeGroupVersion.WithKind("ConfigMap"))
|
||||
o.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("ConfigMap"))
|
||||
err := cli.Get(ctx, client.ObjectKeyFromObject(v), o)
|
||||
if err == nil {
|
||||
n += 1
|
||||
|
|
@ -121,14 +123,14 @@ func TestResourceKeeperGarbageCollect(t *testing.T) {
|
|||
_rts := &v1beta1.ResourceTrackerList{}
|
||||
r.NoError(cli.List(ctx, _rts))
|
||||
r.Equal(rtCount, len(_rts.Items))
|
||||
_crs := &v13.ControllerRevisionList{}
|
||||
_crs := &appsv1.ControllerRevisionList{}
|
||||
r.NoError(cli.List(ctx, _crs))
|
||||
r.Equal(crCount, len(_crs.Items))
|
||||
}
|
||||
|
||||
createRK := func(gen int64, keepLegacy bool, order v1alpha1.GarbageCollectOrder, components ...apicommon.ApplicationComponent) *resourceKeeper {
|
||||
_rk, err := NewResourceKeeper(ctx, cli, &v1beta1.Application{
|
||||
ObjectMeta: v12.ObjectMeta{Name: "app", Namespace: "default", UID: "uid", Generation: gen},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "app", Namespace: "default", UID: "uid", Generation: gen},
|
||||
Spec: v1beta1.ApplicationSpec{Components: components},
|
||||
})
|
||||
r.NoError(err)
|
||||
|
|
@ -163,7 +165,7 @@ func TestResourceKeeperGarbageCollect(t *testing.T) {
|
|||
checkCount(7, 5, 6)
|
||||
|
||||
// delete rt2, trigger gc for cm3
|
||||
dt := v12.Now()
|
||||
dt := metav1.Now()
|
||||
rtMaps[2].SetDeletionTimestamp(&dt)
|
||||
r.NoError(cli.Update(ctx, rtMaps[2]))
|
||||
rk = createRK(4, true, "")
|
||||
|
|
|
|||
|
|
@ -21,8 +21,8 @@ import (
|
|||
"sync"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
v1 "k8s.io/api/apps/v1"
|
||||
v12 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
|
|
@ -43,8 +43,8 @@ type ResourceKeeper interface {
|
|||
StateKeep(context.Context) error
|
||||
ContainsResources([]*unstructured.Unstructured) bool
|
||||
|
||||
DispatchComponentRevision(context.Context, *v1.ControllerRevision) error
|
||||
DeleteComponentRevision(context.Context, *v1.ControllerRevision) error
|
||||
DispatchComponentRevision(context.Context, *appsv1.ControllerRevision) error
|
||||
DeleteComponentRevision(context.Context, *appsv1.ControllerRevision) error
|
||||
}
|
||||
|
||||
type resourceKeeper struct {
|
||||
|
|
@ -60,6 +60,7 @@ type resourceKeeper struct {
|
|||
|
||||
applyOncePolicy *v1alpha1.ApplyOncePolicySpec
|
||||
garbageCollectPolicy *v1alpha1.GarbageCollectPolicySpec
|
||||
sharedResourcePolicy *v1alpha1.SharedResourcePolicySpec
|
||||
|
||||
cache *resourceCache
|
||||
}
|
||||
|
|
@ -95,12 +96,15 @@ func (h *resourceKeeper) parseApplicationResourcePolicy() (err error) {
|
|||
if h.applyOncePolicy, err = policy.ParseApplyOncePolicy(h.app); err != nil {
|
||||
return errors.Wrapf(err, "failed to parse apply-once policy")
|
||||
}
|
||||
if h.applyOncePolicy == nil && v12.HasLabel(h.app.ObjectMeta, oam.LabelAddonName) {
|
||||
if h.applyOncePolicy == nil && metav1.HasLabel(h.app.ObjectMeta, oam.LabelAddonName) {
|
||||
h.applyOncePolicy = &v1alpha1.ApplyOncePolicySpec{Enable: true}
|
||||
}
|
||||
if h.garbageCollectPolicy, err = policy.ParseGarbageCollectPolicy(h.app); err != nil {
|
||||
return errors.Wrapf(err, "failed to parse garbage-collect policy")
|
||||
}
|
||||
if h.sharedResourcePolicy, err = policy.ParseSharedResourcePolicy(h.app); err != nil {
|
||||
return errors.Wrapf(err, "failed to parse shared-resource policy")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -115,7 +119,7 @@ func NewResourceKeeper(ctx context.Context, cli client.Client, app *v1beta1.Appl
|
|||
Client: cli,
|
||||
app: app,
|
||||
applicator: apply.NewAPIApplicator(cli),
|
||||
cache: newResourceCache(cli),
|
||||
cache: newResourceCache(cli, app),
|
||||
}
|
||||
if err = h.loadResourceTrackers(ctx); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to load resourcetrackers")
|
||||
|
|
|
|||
|
|
@ -64,7 +64,11 @@ func (h *resourceKeeper) StateKeep(ctx context.Context) error {
|
|||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to apply once resource %s from resourcetracker %s", mr.ResourceKey(), rt.Name)
|
||||
}
|
||||
if err = h.applicator.Apply(applyCtx, manifest, apply.MustBeControlledByApp(h.app)); err != nil {
|
||||
ao := []apply.ApplyOption{apply.MustBeControlledByApp(h.app)}
|
||||
if h.isShared(manifest) {
|
||||
ao = append([]apply.ApplyOption{apply.SharedByApp(h.app)}, ao...)
|
||||
}
|
||||
if err = h.applicator.Apply(applyCtx, manifest, ao...); err != nil {
|
||||
return errors.Wrapf(err, "failed to re-apply resource %s from resourcetracker %s", mr.ResourceKey(), rt.Name)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -24,8 +24,8 @@ import (
|
|||
"github.com/crossplane/crossplane-runtime/pkg/fieldpath"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
v13 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
|
@ -40,66 +40,92 @@ import (
|
|||
|
||||
var _ = Describe("Test ResourceKeeper StateKeep", func() {
|
||||
|
||||
createConfigMapClusterObjectReference := func(name string) common.ClusterObjectReference {
|
||||
return common.ClusterObjectReference{
|
||||
ObjectReference: corev1.ObjectReference{
|
||||
Kind: "ConfigMap",
|
||||
APIVersion: corev1.SchemeGroupVersion.String(),
|
||||
Name: name,
|
||||
Namespace: "default",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
createConfigMapWithSharedBy := func(name string, ns string, appName string, sharedBy string, value string) *unstructured.Unstructured {
|
||||
o := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"metadata": map[string]interface{}{
|
||||
"name": name,
|
||||
"namespace": ns,
|
||||
"labels": map[string]interface{}{
|
||||
oam.LabelAppName: appName,
|
||||
oam.LabelAppNamespace: ns,
|
||||
},
|
||||
"annotations": map[string]interface{}{oam.AnnotationAppSharedBy: sharedBy},
|
||||
},
|
||||
"data": map[string]interface{}{
|
||||
"key": value,
|
||||
},
|
||||
},
|
||||
}
|
||||
o.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("ConfigMap"))
|
||||
return o
|
||||
}
|
||||
|
||||
createConfigMap := func(name string, value string) *unstructured.Unstructured {
|
||||
return createConfigMapWithSharedBy(name, "default", "", "", value)
|
||||
}
|
||||
|
||||
It("Test StateKeep for various scene", func() {
|
||||
cli := testClient
|
||||
createConfigMap := func(name string, value string) *unstructured.Unstructured {
|
||||
o := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"metadata": map[string]interface{}{
|
||||
"name": name,
|
||||
"namespace": "default",
|
||||
},
|
||||
"data": map[string]interface{}{
|
||||
"key": value,
|
||||
},
|
||||
},
|
||||
|
||||
setOwner := func(obj *unstructured.Unstructured) {
|
||||
labels := obj.GetLabels()
|
||||
if labels == nil {
|
||||
labels = map[string]string{}
|
||||
}
|
||||
o.SetGroupVersionKind(v1.SchemeGroupVersion.WithKind("ConfigMap"))
|
||||
return o
|
||||
labels[oam.LabelAppName] = "app"
|
||||
labels[oam.LabelAppNamespace] = "default"
|
||||
obj.SetLabels(labels)
|
||||
}
|
||||
|
||||
// state-keep add this resource
|
||||
cm1 := createConfigMap("cm1", "value")
|
||||
setOwner(cm1)
|
||||
cmRaw1, err := json.Marshal(cm1)
|
||||
Expect(err).Should(Succeed())
|
||||
|
||||
// state-keep skip this resource
|
||||
cm2 := createConfigMap("cm2", "value")
|
||||
setOwner(cm2)
|
||||
Expect(cli.Create(context.Background(), cm2)).Should(Succeed())
|
||||
|
||||
// state-keep delete this resource
|
||||
cm3 := createConfigMap("cm3", "value")
|
||||
setOwner(cm3)
|
||||
Expect(cli.Create(context.Background(), cm3)).Should(Succeed())
|
||||
|
||||
// state-keep delete this resource
|
||||
cm4 := createConfigMap("cm4", "value")
|
||||
setOwner(cm4)
|
||||
cmRaw4, err := json.Marshal(cm4)
|
||||
Expect(err).Should(Succeed())
|
||||
Expect(cli.Create(context.Background(), cm4)).Should(Succeed())
|
||||
|
||||
// state-keep update this resource
|
||||
cm5 := createConfigMap("cm5", "value")
|
||||
setOwner(cm5)
|
||||
cmRaw5, err := json.Marshal(cm5)
|
||||
Expect(err).Should(Succeed())
|
||||
cm5.Object["data"].(map[string]interface{})["key"] = "changed"
|
||||
Expect(cli.Create(context.Background(), cm5)).Should(Succeed())
|
||||
|
||||
createConfigMapClusterObjectReference := func(name string) common.ClusterObjectReference {
|
||||
return common.ClusterObjectReference{
|
||||
ObjectReference: v1.ObjectReference{
|
||||
Kind: "ConfigMap",
|
||||
APIVersion: v1.SchemeGroupVersion.String(),
|
||||
Name: name,
|
||||
Namespace: "default",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
app := &v1beta1.Application{ObjectMeta: metav1.ObjectMeta{Name: "app", Namespace: "default"}}
|
||||
h := &resourceKeeper{
|
||||
Client: cli,
|
||||
app: &v1beta1.Application{ObjectMeta: v13.ObjectMeta{Name: "app", Namespace: "default"}},
|
||||
app: app,
|
||||
applicator: apply.NewAPIApplicator(cli),
|
||||
cache: newResourceCache(cli),
|
||||
cache: newResourceCache(cli, app),
|
||||
}
|
||||
|
||||
h._currentRT = &v1beta1.ResourceTracker{
|
||||
|
|
@ -125,7 +151,7 @@ var _ = Describe("Test ResourceKeeper StateKeep", func() {
|
|||
|
||||
Expect(h.StateKeep(context.Background())).Should(Succeed())
|
||||
cms := &unstructured.UnstructuredList{}
|
||||
cms.SetGroupVersionKind(v1.SchemeGroupVersion.WithKind("ConfigMap"))
|
||||
cms.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("ConfigMap"))
|
||||
Expect(cli.List(context.Background(), cms, client.InNamespace("default"))).Should(Succeed())
|
||||
Expect(len(cms.Items)).Should(Equal(3))
|
||||
Expect(cms.Items[0].GetName()).Should(Equal("cm1"))
|
||||
|
|
@ -143,6 +169,49 @@ var _ = Describe("Test ResourceKeeper StateKeep", func() {
|
|||
Expect(err).ShouldNot(Succeed())
|
||||
Expect(err.Error()).Should(ContainSubstring("failed to re-apply"))
|
||||
})
|
||||
|
||||
It("Test StateKeep for shared resources", func() {
|
||||
cli := testClient
|
||||
ctx := context.Background()
|
||||
Expect(cli.Create(ctx, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "test-shared"}})).Should(Succeed())
|
||||
cm1 := createConfigMapWithSharedBy("cm1", "test-shared", "app", "test-shared/app", "x")
|
||||
cmRaw1, err := json.Marshal(cm1)
|
||||
Expect(err).Should(Succeed())
|
||||
cm2 := createConfigMapWithSharedBy("cm2", "test-shared", "app", "", "y")
|
||||
cmRaw2, err := json.Marshal(cm2)
|
||||
Expect(err).Should(Succeed())
|
||||
app := &v1beta1.Application{ObjectMeta: metav1.ObjectMeta{Name: "app", Namespace: "test-shared"}}
|
||||
h := &resourceKeeper{
|
||||
Client: cli,
|
||||
app: app,
|
||||
applicator: apply.NewAPIApplicator(cli),
|
||||
cache: newResourceCache(cli, app),
|
||||
}
|
||||
h.sharedResourcePolicy = &v1alpha1.SharedResourcePolicySpec{Rules: []v1alpha1.SharedResourcePolicyRule{{
|
||||
Selector: v1alpha1.ResourcePolicyRuleSelector{ResourceTypes: []string{"ConfigMap"}},
|
||||
}}}
|
||||
h._currentRT = &v1beta1.ResourceTracker{
|
||||
Spec: v1beta1.ResourceTrackerSpec{
|
||||
ManagedResources: []v1beta1.ManagedResource{{
|
||||
ClusterObjectReference: createConfigMapClusterObjectReference("cm1"),
|
||||
Data: &runtime.RawExtension{Raw: cmRaw1},
|
||||
}, {
|
||||
ClusterObjectReference: createConfigMapClusterObjectReference("cm2"),
|
||||
Data: &runtime.RawExtension{Raw: cmRaw2},
|
||||
}},
|
||||
},
|
||||
}
|
||||
cm1 = createConfigMapWithSharedBy("cm1", "test-shared", "app", "test-shared/app,test-shared/another", "z")
|
||||
Expect(cli.Create(ctx, cm1)).Should(Succeed())
|
||||
cm2 = createConfigMapWithSharedBy("cm2", "test-shared", "another", "test-shared/another,test-shared/app", "z")
|
||||
Expect(cli.Create(ctx, cm2)).Should(Succeed())
|
||||
Expect(h.StateKeep(ctx)).Should(Succeed())
|
||||
Expect(cli.Get(ctx, client.ObjectKeyFromObject(cm1), cm1)).Should(Succeed())
|
||||
Expect(cm1.Object["data"].(map[string]interface{})["key"]).Should(Equal("x"))
|
||||
Expect(cli.Get(ctx, client.ObjectKeyFromObject(cm2), cm2)).Should(Succeed())
|
||||
Expect(cm2.Object["data"].(map[string]interface{})["key"]).Should(Equal("z"))
|
||||
})
|
||||
|
||||
It("Test StateKeep for apply-once policy", func() {
|
||||
|
||||
clusterManifest := &unstructured.Unstructured{}
|
||||
|
|
@ -213,7 +282,7 @@ var _ = Describe("Test ResourceKeeper StateKeep", func() {
|
|||
},
|
||||
},
|
||||
}
|
||||
o.SetGroupVersionKind(v1.SchemeGroupVersion.WithKind("Deployment"))
|
||||
o.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("Deployment"))
|
||||
return o
|
||||
}
|
||||
|
||||
|
|
@ -225,36 +294,37 @@ var _ = Describe("Test ResourceKeeper StateKeep", func() {
|
|||
|
||||
createDeploymentClusterObjectReference := func(name string) common.ClusterObjectReference {
|
||||
return common.ClusterObjectReference{
|
||||
ObjectReference: v1.ObjectReference{
|
||||
ObjectReference: corev1.ObjectReference{
|
||||
Kind: "Deployment",
|
||||
APIVersion: v1.SchemeGroupVersion.String(),
|
||||
APIVersion: corev1.SchemeGroupVersion.String(),
|
||||
Name: name,
|
||||
Namespace: "default",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
app := &v1beta1.Application{ObjectMeta: metav1.ObjectMeta{Name: "app", Namespace: "default"},
|
||||
Spec: v1beta1.ApplicationSpec{
|
||||
Components: []common.ApplicationComponent{
|
||||
{
|
||||
Name: "fourierapp03-comp-01",
|
||||
Type: "worker",
|
||||
Properties: &runtime.RawExtension{Raw: []byte("{\"cmd\":[\"sleep\",\"1000\"],\"image\":\"busybox\"}")},
|
||||
},
|
||||
},
|
||||
Policies: []v1beta1.AppPolicy{
|
||||
{
|
||||
Name: "apply-once-01",
|
||||
Type: "apply-once",
|
||||
Properties: &runtime.RawExtension{Raw: []byte(`{"enable": true,"rules": [{"selector": { "componentNames": ["fourierapp03-comp-01"], "resourceTypes": ["Deployment" ], "strategy": {"path": ["spec.replicas"] } }}]}`)},
|
||||
},
|
||||
},
|
||||
}}
|
||||
h := &resourceKeeper{
|
||||
Client: cli,
|
||||
app: &v1beta1.Application{ObjectMeta: v13.ObjectMeta{Name: "app", Namespace: "default"},
|
||||
Spec: v1beta1.ApplicationSpec{
|
||||
Components: []common.ApplicationComponent{
|
||||
{
|
||||
Name: "fourierapp03-comp-01",
|
||||
Type: "worker",
|
||||
Properties: &runtime.RawExtension{Raw: []byte("{\"cmd\":[\"sleep\",\"1000\"],\"image\":\"busybox\"}")},
|
||||
},
|
||||
},
|
||||
Policies: []v1beta1.AppPolicy{
|
||||
{
|
||||
Name: "apply-once-01",
|
||||
Type: "apply-once",
|
||||
Properties: &runtime.RawExtension{Raw: []byte(`{"enable": true,"rules": [{"selector": { "componentNames": ["fourierapp03-comp-01"], "resourceTypes": ["Deployment" ], "strategy": {"path": ["spec.replicas"] } }}]}`)},
|
||||
},
|
||||
},
|
||||
}},
|
||||
Client: cli,
|
||||
app: app,
|
||||
applicator: apply.NewAPIApplicator(cli),
|
||||
cache: newResourceCache(cli),
|
||||
cache: newResourceCache(cli, app),
|
||||
applyOncePolicy: &v1alpha1.ApplyOncePolicySpec{
|
||||
Enable: true,
|
||||
Rules: []v1alpha1.ApplyOncePolicyRule{{
|
||||
|
|
|
|||
|
|
@ -33,3 +33,10 @@ func (h *resourceKeeper) ClearNamespaceForClusterScopedResources(manifests []*un
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (h *resourceKeeper) isShared(manifest *unstructured.Unstructured) bool {
|
||||
if h.sharedResourcePolicy == nil {
|
||||
return false
|
||||
}
|
||||
return h.sharedResourcePolicy.FindStrategy(manifest)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -32,11 +32,12 @@ var _ = Describe("Test ResourceKeeper utilities", func() {
|
|||
|
||||
It("Test ClearNamespaceForClusterScopedResources", func() {
|
||||
cli := testClient
|
||||
app := &v1beta1.Application{ObjectMeta: metav1.ObjectMeta{Name: "app", Namespace: "default"}}
|
||||
h := &resourceKeeper{
|
||||
Client: cli,
|
||||
app: &v1beta1.Application{ObjectMeta: metav1.ObjectMeta{Name: "app", Namespace: "default"}},
|
||||
app: app,
|
||||
applicator: apply.NewAPIApplicator(cli),
|
||||
cache: newResourceCache(cli),
|
||||
cache: newResourceCache(cli, app),
|
||||
}
|
||||
ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "example", Namespace: "vela"}}
|
||||
nsObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(ns)
|
||||
|
|
|
|||
|
|
@ -18,6 +18,7 @@ package apply
|
|||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
|
@ -50,6 +51,7 @@ type Applicator interface {
|
|||
}
|
||||
|
||||
type applyAction struct {
|
||||
isShared bool
|
||||
skipUpdate bool
|
||||
updateAnnotation bool
|
||||
}
|
||||
|
|
@ -239,7 +241,7 @@ func executeApplyOptions(act *applyAction, existing, desired client.Object, aos
|
|||
// NotUpdateRenderHashEqual if the render hash of new object equal to the old hash, should not apply.
|
||||
func NotUpdateRenderHashEqual() ApplyOption {
|
||||
return func(act *applyAction, existing, desired client.Object) error {
|
||||
if existing == nil || desired == nil {
|
||||
if existing == nil || desired == nil || act.isShared {
|
||||
return nil
|
||||
}
|
||||
newSt, ok := desired.(*unstructured.Unstructured)
|
||||
|
|
@ -279,28 +281,41 @@ func MustBeControllableBy(u types.UID) ApplyOption {
|
|||
|
||||
// MustBeControlledByApp requires that the new object is controllable by versioned resourcetracker
|
||||
func MustBeControlledByApp(app *v1beta1.Application) ApplyOption {
|
||||
return func(_ *applyAction, existing, _ client.Object) error {
|
||||
if existing == nil {
|
||||
return func(act *applyAction, existing, _ client.Object) error {
|
||||
if existing == nil || act.isShared {
|
||||
return nil
|
||||
}
|
||||
labels := existing.GetLabels()
|
||||
if labels == nil {
|
||||
return nil
|
||||
}
|
||||
if appName, exists := labels[oam.LabelAppName]; exists && appName != app.Name {
|
||||
return fmt.Errorf("existing object is managed by other application %s", appName)
|
||||
}
|
||||
ns := app.Namespace
|
||||
if ns == "" {
|
||||
ns = metav1.NamespaceDefault
|
||||
}
|
||||
if appNs, exists := labels[oam.LabelAppNamespace]; exists && appNs != ns {
|
||||
return fmt.Errorf("existing object is managed by other application %s/%s", appNs, labels[oam.LabelAppName])
|
||||
appKey, controlledBy := GetAppKey(app), GetControlledBy(existing)
|
||||
if controlledBy != "" && controlledBy != appKey {
|
||||
return fmt.Errorf("existing object is managed by other application %s", controlledBy)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// GetControlledBy extract the application that controls the current resource
|
||||
func GetControlledBy(existing client.Object) string {
|
||||
labels := existing.GetLabels()
|
||||
if labels == nil {
|
||||
return ""
|
||||
}
|
||||
appName := labels[oam.LabelAppName]
|
||||
appNs := labels[oam.LabelAppNamespace]
|
||||
if appName == "" || appNs == "" {
|
||||
return ""
|
||||
}
|
||||
return fmt.Sprintf("%s/%s", appNs, appName)
|
||||
}
|
||||
|
||||
// GetAppKey construct the key for identifying the application
|
||||
func GetAppKey(app *v1beta1.Application) string {
|
||||
ns := app.Namespace
|
||||
if ns == "" {
|
||||
ns = metav1.NamespaceDefault
|
||||
}
|
||||
return fmt.Sprintf("%s/%s", ns, app.GetName())
|
||||
}
|
||||
|
||||
// MakeCustomApplyOption let user can generate applyOption that restrict change apply action.
|
||||
func MakeCustomApplyOption(f func(existing, desired client.Object) error) ApplyOption {
|
||||
return func(act *applyAction, existing, desired client.Object) error {
|
||||
|
|
@ -315,3 +330,43 @@ func DisableUpdateAnnotation() ApplyOption {
|
|||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// SharedByApp let the resource be sharable
|
||||
func SharedByApp(app *v1beta1.Application) ApplyOption {
|
||||
return func(act *applyAction, existing, desired client.Object) error {
|
||||
// calculate the shared-by annotation
|
||||
// if resource exists, add the current application into the resource shared-by field
|
||||
var sharedBy string
|
||||
if existing != nil && existing.GetAnnotations() != nil {
|
||||
sharedBy = existing.GetAnnotations()[oam.AnnotationAppSharedBy]
|
||||
}
|
||||
sharedBy = AddSharer(sharedBy, app)
|
||||
util.AddAnnotations(desired, map[string]string{oam.AnnotationAppSharedBy: sharedBy})
|
||||
if existing == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// resource exists and controlled by current application
|
||||
appKey, controlledBy := GetAppKey(app), GetControlledBy(existing)
|
||||
if controlledBy == "" || appKey == controlledBy {
|
||||
return nil
|
||||
}
|
||||
|
||||
// resource exists but not controlled by current application
|
||||
if existing.GetAnnotations() == nil || existing.GetAnnotations()[oam.AnnotationAppSharedBy] == "" {
|
||||
// if the application that controls the resource does not allow sharing, return error
|
||||
return fmt.Errorf("application is controlled by %s but is not sharable", controlledBy)
|
||||
}
|
||||
// the application that controls the resource allows sharing, then only mutate the shared-by annotation
|
||||
act.isShared = true
|
||||
bs, err := json.Marshal(existing)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = json.Unmarshal(bs, desired); err != nil {
|
||||
return err
|
||||
}
|
||||
util.AddAnnotations(desired, map[string]string{oam.AnnotationAppSharedBy: sharedBy})
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -426,6 +426,134 @@ func TestMustBeControlledByApp(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestSharedByApp(t *testing.T) {
|
||||
app := &v1beta1.Application{ObjectMeta: metav1.ObjectMeta{Name: "app"}}
|
||||
ao := SharedByApp(app)
|
||||
testCases := map[string]struct {
|
||||
existing client.Object
|
||||
desired client.Object
|
||||
output client.Object
|
||||
hasError bool
|
||||
}{
|
||||
"create new resource": {
|
||||
existing: nil,
|
||||
desired: &unstructured.Unstructured{Object: map[string]interface{}{
|
||||
"kind": "ConfigMap",
|
||||
}},
|
||||
output: &unstructured.Unstructured{Object: map[string]interface{}{
|
||||
"kind": "ConfigMap",
|
||||
"metadata": map[string]interface{}{
|
||||
"annotations": map[string]interface{}{oam.AnnotationAppSharedBy: "default/app"},
|
||||
},
|
||||
}},
|
||||
},
|
||||
"add sharer to existing resource": {
|
||||
existing: &unstructured.Unstructured{Object: map[string]interface{}{
|
||||
"kind": "ConfigMap",
|
||||
}},
|
||||
desired: &unstructured.Unstructured{Object: map[string]interface{}{
|
||||
"kind": "ConfigMap",
|
||||
}},
|
||||
output: &unstructured.Unstructured{Object: map[string]interface{}{
|
||||
"kind": "ConfigMap",
|
||||
"metadata": map[string]interface{}{
|
||||
"annotations": map[string]interface{}{oam.AnnotationAppSharedBy: "default/app"},
|
||||
},
|
||||
}},
|
||||
},
|
||||
"add sharer to existing sharing resource": {
|
||||
existing: &unstructured.Unstructured{Object: map[string]interface{}{
|
||||
"kind": "ConfigMap",
|
||||
"metadata": map[string]interface{}{
|
||||
"labels": map[string]interface{}{
|
||||
oam.LabelAppName: "example",
|
||||
oam.LabelAppNamespace: "default",
|
||||
},
|
||||
"annotations": map[string]interface{}{oam.AnnotationAppSharedBy: "x/y"},
|
||||
},
|
||||
"data": "x",
|
||||
}},
|
||||
desired: &unstructured.Unstructured{Object: map[string]interface{}{
|
||||
"kind": "ConfigMap",
|
||||
"data": "y",
|
||||
}},
|
||||
output: &unstructured.Unstructured{Object: map[string]interface{}{
|
||||
"kind": "ConfigMap",
|
||||
"metadata": map[string]interface{}{
|
||||
"labels": map[string]interface{}{
|
||||
oam.LabelAppName: "example",
|
||||
oam.LabelAppNamespace: "default",
|
||||
},
|
||||
"annotations": map[string]interface{}{oam.AnnotationAppSharedBy: "x/y,default/app"},
|
||||
},
|
||||
"data": "x",
|
||||
}},
|
||||
},
|
||||
"add sharer to existing sharing resource owned by self": {
|
||||
existing: &unstructured.Unstructured{Object: map[string]interface{}{
|
||||
"kind": "ConfigMap",
|
||||
"metadata": map[string]interface{}{
|
||||
"labels": map[string]interface{}{
|
||||
oam.LabelAppName: "app",
|
||||
oam.LabelAppNamespace: "default",
|
||||
},
|
||||
"annotations": map[string]interface{}{oam.AnnotationAppSharedBy: "default/app,x/y"},
|
||||
},
|
||||
"data": "x",
|
||||
}},
|
||||
desired: &unstructured.Unstructured{Object: map[string]interface{}{
|
||||
"kind": "ConfigMap",
|
||||
"metadata": map[string]interface{}{
|
||||
"labels": map[string]interface{}{
|
||||
oam.LabelAppName: "app",
|
||||
oam.LabelAppNamespace: "default",
|
||||
},
|
||||
"annotations": map[string]interface{}{oam.AnnotationAppSharedBy: "default/app"},
|
||||
},
|
||||
"data": "y",
|
||||
}},
|
||||
output: &unstructured.Unstructured{Object: map[string]interface{}{
|
||||
"kind": "ConfigMap",
|
||||
"metadata": map[string]interface{}{
|
||||
"labels": map[string]interface{}{
|
||||
oam.LabelAppName: "app",
|
||||
oam.LabelAppNamespace: "default",
|
||||
},
|
||||
"annotations": map[string]interface{}{oam.AnnotationAppSharedBy: "default/app,x/y"},
|
||||
},
|
||||
"data": "y",
|
||||
}},
|
||||
},
|
||||
"add sharer to existing non-sharing resource": {
|
||||
existing: &unstructured.Unstructured{Object: map[string]interface{}{
|
||||
"kind": "ConfigMap",
|
||||
"metadata": map[string]interface{}{
|
||||
"labels": map[string]interface{}{
|
||||
oam.LabelAppName: "example",
|
||||
oam.LabelAppNamespace: "default",
|
||||
},
|
||||
},
|
||||
}},
|
||||
desired: &unstructured.Unstructured{Object: map[string]interface{}{
|
||||
"kind": "ConfigMap",
|
||||
}},
|
||||
hasError: true,
|
||||
},
|
||||
}
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
r := require.New(t)
|
||||
err := ao(&applyAction{}, tc.existing, tc.desired)
|
||||
if tc.hasError {
|
||||
r.Error(err)
|
||||
} else {
|
||||
r.NoError(err)
|
||||
r.Equal(tc.output, tc.desired)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilterSpecialAnn(t *testing.T) {
|
||||
var cm = &corev1.ConfigMap{}
|
||||
var sc = &corev1.Secret{}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,65 @@
|
|||
/*
|
||||
Copyright 2022 The KubeVela Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apply
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"k8s.io/utils/strings/slices"
|
||||
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
|
||||
)
|
||||
|
||||
const (
|
||||
sharedBySep = ","
|
||||
)
|
||||
|
||||
// AddSharer add sharer
|
||||
func AddSharer(sharedBy string, app *v1beta1.Application) string {
|
||||
key := GetAppKey(app)
|
||||
sharers := strings.Split(sharedBy, sharedBySep)
|
||||
existing := slices.Contains(sharers, key)
|
||||
if !existing {
|
||||
sharers = append(slices.Filter(nil, sharers, func(s string) bool {
|
||||
return s != ""
|
||||
}), key)
|
||||
}
|
||||
return strings.Join(sharers, sharedBySep)
|
||||
}
|
||||
|
||||
// ContainsSharer check if the shared-by annotation contains the target application
|
||||
func ContainsSharer(sharedBy string, app *v1beta1.Application) bool {
|
||||
key := GetAppKey(app)
|
||||
sharers := strings.Split(sharedBy, sharedBySep)
|
||||
return slices.Contains(sharers, key)
|
||||
}
|
||||
|
||||
// FirstSharer get the first sharer of the application
|
||||
func FirstSharer(sharedBy string) string {
|
||||
sharers := strings.Split(sharedBy, sharedBySep)
|
||||
return sharers[0]
|
||||
}
|
||||
|
||||
// RemoveSharer remove sharer
|
||||
func RemoveSharer(sharedBy string, app *v1beta1.Application) string {
|
||||
key := GetAppKey(app)
|
||||
sharers := strings.Split(sharedBy, sharedBySep)
|
||||
sharers = slices.Filter(nil, sharers, func(s string) bool {
|
||||
return s != key
|
||||
})
|
||||
return strings.Join(sharers, sharedBySep)
|
||||
}
|
||||
|
|
@ -0,0 +1,41 @@
|
|||
/*
|
||||
Copyright 2022 The KubeVela Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apply
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
|
||||
)
|
||||
|
||||
func TestShare(t *testing.T) {
|
||||
r := require.New(t)
|
||||
app := &v1beta1.Application{}
|
||||
app.SetName("app")
|
||||
app.SetNamespace("test")
|
||||
r.Equal("test/app", AddSharer("", app))
|
||||
r.Equal("test/app,x/y", AddSharer("test/app,x/y", app))
|
||||
r.Equal("x/y,test/app", AddSharer("x/y", app))
|
||||
r.True(ContainsSharer("a/b,test/app,x/y", app))
|
||||
r.False(ContainsSharer("a/b,x/y", app))
|
||||
r.Equal("a/b", FirstSharer("a/b,x/y"))
|
||||
r.Equal("", FirstSharer(""))
|
||||
r.Equal("a/b,x/y", RemoveSharer("a/b,test/app,x/y", app))
|
||||
r.Equal("a/b,x/y", RemoveSharer("a/b,x/y", app))
|
||||
}
|
||||
|
|
@ -501,5 +501,82 @@ var _ = Describe("Test multicluster scenario", func() {
|
|||
g.Expect(kerrors.IsNotFound(err)).Should(BeTrue())
|
||||
}, 2*time.Minute).Should(Succeed())
|
||||
})
|
||||
|
||||
It("Test applications sharing resources", func() {
|
||||
createApp := func(name string) *v1beta1.Application {
|
||||
return &v1beta1.Application{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: name},
|
||||
Spec: v1beta1.ApplicationSpec{
|
||||
Components: []common.ApplicationComponent{{
|
||||
Name: "shared-resource-" + name,
|
||||
Type: "k8s-objects",
|
||||
Properties: &runtime.RawExtension{Raw: []byte(`{"objects":[{"apiVersion":"v1","kind":"ConfigMap","metadata":{"name":"shared"},"data":{"key":"value"}}]}`)},
|
||||
}, {
|
||||
Name: "no-shared-resource-" + name,
|
||||
Type: "k8s-objects",
|
||||
Properties: &runtime.RawExtension{Raw: []byte(`{"objects":[{"apiVersion":"v1","kind":"ConfigMap","metadata":{"name":"non-shared-` + name + `"},"data":{"key":"value"}}]}`)},
|
||||
}},
|
||||
Policies: []v1beta1.AppPolicy{{
|
||||
Type: "shared-resource",
|
||||
Name: "shared-resource",
|
||||
Properties: &runtime.RawExtension{Raw: []byte(`{"rules":[{"selector":{"componentNames":["shared-resource-` + name + `"]}}]}`)},
|
||||
}},
|
||||
},
|
||||
}
|
||||
}
|
||||
app1 := createApp("app1")
|
||||
Expect(k8sClient.Create(hubCtx, app1)).Should(Succeed())
|
||||
Eventually(func(g Gomega) {
|
||||
g.Expect(k8sClient.Get(hubCtx, client.ObjectKeyFromObject(app1), app1)).Should(Succeed())
|
||||
g.Expect(app1.Status.Phase).Should(Equal(common.ApplicationRunning))
|
||||
}, 10*time.Second).Should(Succeed())
|
||||
app2 := createApp("app2")
|
||||
Expect(k8sClient.Create(hubCtx, app2)).Should(Succeed())
|
||||
Eventually(func(g Gomega) {
|
||||
g.Expect(k8sClient.Get(hubCtx, client.ObjectKeyFromObject(app2), app2)).Should(Succeed())
|
||||
g.Expect(app2.Status.Phase).Should(Equal(common.ApplicationRunning))
|
||||
}, 10*time.Second).Should(Succeed())
|
||||
app3 := createApp("app3")
|
||||
Expect(k8sClient.Create(hubCtx, app3)).Should(Succeed())
|
||||
Eventually(func(g Gomega) {
|
||||
g.Expect(k8sClient.Get(hubCtx, client.ObjectKeyFromObject(app3), app3)).Should(Succeed())
|
||||
g.Expect(app3.Status.Phase).Should(Equal(common.ApplicationRunning))
|
||||
}, 10*time.Second).Should(Succeed())
|
||||
Eventually(func(g Gomega) {
|
||||
cm := &corev1.ConfigMap{}
|
||||
g.Expect(k8sClient.Get(hubCtx, types.NamespacedName{Namespace: namespace, Name: "shared"}, cm)).Should(Succeed())
|
||||
g.Expect(cm.GetAnnotations()[oam.AnnotationAppSharedBy]).Should(SatisfyAll(ContainSubstring("app1"), ContainSubstring("app2"), ContainSubstring("app3")))
|
||||
g.Expect(cm.GetLabels()[oam.LabelAppName]).Should(SatisfyAny(Equal("app1"), Equal("app2"), Equal("app3")))
|
||||
g.Expect(k8sClient.Get(hubCtx, types.NamespacedName{Namespace: namespace, Name: "non-shared-app1"}, &corev1.ConfigMap{})).Should(Succeed())
|
||||
g.Expect(k8sClient.Get(hubCtx, types.NamespacedName{Namespace: namespace, Name: "non-shared-app2"}, &corev1.ConfigMap{})).Should(Succeed())
|
||||
g.Expect(k8sClient.Get(hubCtx, types.NamespacedName{Namespace: namespace, Name: "non-shared-app3"}, &corev1.ConfigMap{})).Should(Succeed())
|
||||
}, 45*time.Second).Should(Succeed())
|
||||
Expect(k8sClient.Delete(hubCtx, app2)).Should(Succeed())
|
||||
Eventually(func(g Gomega) {
|
||||
cm := &corev1.ConfigMap{}
|
||||
g.Expect(k8sClient.Get(hubCtx, types.NamespacedName{Namespace: namespace, Name: "shared"}, cm)).Should(Succeed())
|
||||
g.Expect(cm.GetAnnotations()[oam.AnnotationAppSharedBy]).Should(SatisfyAll(ContainSubstring("app1"), ContainSubstring("app3")))
|
||||
g.Expect(cm.GetAnnotations()[oam.AnnotationAppSharedBy]).ShouldNot(SatisfyAny(ContainSubstring("app2")))
|
||||
g.Expect(cm.GetLabels()[oam.LabelAppName]).Should(SatisfyAny(Equal("app1"), Equal("app3")))
|
||||
g.Expect(k8sClient.Get(hubCtx, types.NamespacedName{Namespace: namespace, Name: "non-shared-app1"}, &corev1.ConfigMap{})).Should(Succeed())
|
||||
g.Expect(k8sClient.Get(hubCtx, types.NamespacedName{Namespace: namespace, Name: "non-shared-app2"}, &corev1.ConfigMap{})).Should(Satisfy(kerrors.IsNotFound))
|
||||
g.Expect(k8sClient.Get(hubCtx, types.NamespacedName{Namespace: namespace, Name: "non-shared-app3"}, &corev1.ConfigMap{})).Should(Succeed())
|
||||
}, 10*time.Second).Should(Succeed())
|
||||
Expect(k8sClient.Delete(hubCtx, app1)).Should(Succeed())
|
||||
Eventually(func(g Gomega) {
|
||||
cm := &corev1.ConfigMap{}
|
||||
g.Expect(k8sClient.Get(hubCtx, types.NamespacedName{Namespace: namespace, Name: "shared"}, cm)).Should(Succeed())
|
||||
g.Expect(cm.GetAnnotations()[oam.AnnotationAppSharedBy]).Should(SatisfyAll(ContainSubstring("app3")))
|
||||
g.Expect(cm.GetAnnotations()[oam.AnnotationAppSharedBy]).ShouldNot(SatisfyAny(ContainSubstring("app1"), ContainSubstring("app2")))
|
||||
g.Expect(cm.GetLabels()[oam.LabelAppName]).Should(Equal("app3"))
|
||||
g.Expect(k8sClient.Get(hubCtx, types.NamespacedName{Namespace: namespace, Name: "non-shared-app1"}, &corev1.ConfigMap{})).Should(Satisfy(kerrors.IsNotFound))
|
||||
g.Expect(k8sClient.Get(hubCtx, types.NamespacedName{Namespace: namespace, Name: "non-shared-app3"}, &corev1.ConfigMap{})).Should(Succeed())
|
||||
}, 10*time.Second).Should(Succeed())
|
||||
Expect(k8sClient.Delete(hubCtx, app3)).Should(Succeed())
|
||||
Eventually(func(g Gomega) {
|
||||
g.Expect(k8sClient.Get(hubCtx, types.NamespacedName{Namespace: namespace, Name: "shared"}, &corev1.ConfigMap{})).Should(Satisfy(kerrors.IsNotFound))
|
||||
g.Expect(k8sClient.Get(hubCtx, types.NamespacedName{Namespace: namespace, Name: "non-shared-app3"}, &corev1.ConfigMap{})).Should(Satisfy(kerrors.IsNotFound))
|
||||
}, 10*time.Second).Should(Succeed())
|
||||
})
|
||||
})
|
||||
})
|
||||
|
|
|
|||
Loading…
Reference in New Issue