Feat: health scope controller support check trait-managing workload (#2527)

* Feat: health scope controller support trait manage-workload

* Test: add multi-cluster and health policy e2e-test

Fix: e2e-test

lint

Fix: all e2e-test

Fix: modify port back

middle commit

middle pr

Fix: e2e-rollout-test

make reviewable

defet to rollout apply gvk annotation

fix e2e

* Update test/e2e-multicluster-test/multicluster_rollout_test.go

Co-authored-by: Hongchao Deng <hongchaodeng1@gmail.com>

Co-authored-by: Hongchao Deng <hongchaodeng1@gmail.com>
This commit is contained in:
wyike 2021-10-26 19:51:53 +08:00 committed by GitHub
parent 4dd18d1fc3
commit f48da214e3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 258 additions and 66 deletions

View File

@ -43,6 +43,7 @@ import (
common2 "github.com/oam-dev/kubevela/pkg/controller/common"
core "github.com/oam-dev/kubevela/pkg/controller/core.oam.dev"
"github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1alpha1/envbinding"
"github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1alpha2/application/assemble"
"github.com/oam-dev/kubevela/pkg/cue/packages"
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/oam-dev/kubevela/pkg/oam/discoverymapper"
@ -236,7 +237,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu
return r.endWithNegativeCondition(ctx, app, condition.ErrorCondition("Render", err), common.ApplicationRendering)
}
handler.handleCheckManageWorkloadTrait(handler.currentAppRev.Spec.TraitDefinitions, comps)
assemble.HandleCheckManageWorkloadTrait(*handler.currentAppRev, comps)
if err := handler.HandleComponentsRevision(ctx, comps); err != nil {
klog.ErrorS(err, "Failed to handle compoents revision", "application", klog.KObj(app))

View File

@ -37,7 +37,6 @@ import (
"github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1alpha2/applicationrollout"
"github.com/oam-dev/kubevela/pkg/controller/utils"
"github.com/oam-dev/kubevela/pkg/cue/process"
"github.com/oam-dev/kubevela/pkg/oam"
oamutil "github.com/oam-dev/kubevela/pkg/oam/util"
)
@ -304,26 +303,6 @@ func (h *AppHandler) aggregateHealthStatus(appFile *appfile.Appfile) ([]common.A
return appStatus, healthy, nil
}
func (h *AppHandler) handleCheckManageWorkloadTrait(traitDefs map[string]v1beta1.TraitDefinition, comps []*types.ComponentManifest) {
manageWorkloadTrait := map[string]bool{}
for traitName, definition := range traitDefs {
if definition.Spec.ManageWorkload {
manageWorkloadTrait[traitName] = true
}
}
if len(manageWorkloadTrait) == 0 {
return
}
for _, comp := range comps {
for _, trait := range comp.Traits {
traitType := trait.GetLabels()[oam.TraitTypeLabel]
if manageWorkloadTrait[traitType] {
trait.SetLabels(oamutil.MergeMapOverrideWithDst(trait.GetLabels(), map[string]string{oam.LabelManageWorkloadTrait: "true"}))
}
}
}
}
func generateScopeReference(scopes []appfile.Scope) []corev1.ObjectReference {
var references []corev1.ObjectReference
for _, scope := range scopes {

View File

@ -30,7 +30,6 @@ import (
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
@ -41,7 +40,6 @@ import (
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
velatypes "github.com/oam-dev/kubevela/apis/types"
"github.com/oam-dev/kubevela/pkg/appfile"
"github.com/oam-dev/kubevela/pkg/oam"
)
const workloadDefinition = `
@ -217,37 +215,3 @@ var _ = Describe("Test statusAggregate", func() {
Expect(err).Should(BeNil())
})
})
var _ = Describe("Test handleCheckManageWorkloadTrait func", func() {
It("Test every situation", func() {
traitDefs := map[string]v1beta1.TraitDefinition{
"rollout": v1beta1.TraitDefinition{
Spec: v1beta1.TraitDefinitionSpec{
ManageWorkload: true,
},
},
"normal": v1beta1.TraitDefinition{
Spec: v1beta1.TraitDefinitionSpec{},
},
}
rolloutTrait := &unstructured.Unstructured{}
rolloutTrait.SetLabels(map[string]string{oam.TraitTypeLabel: "rollout"})
normalTrait := &unstructured.Unstructured{}
normalTrait.SetLabels(map[string]string{oam.TraitTypeLabel: "normal"})
comps := []*velatypes.ComponentManifest{
{
Traits: []*unstructured.Unstructured{
rolloutTrait,
normalTrait,
},
},
}
h := AppHandler{}
h.handleCheckManageWorkloadTrait(traitDefs, comps)
Expect(len(rolloutTrait.GetLabels())).Should(BeEquivalentTo(2))
Expect(rolloutTrait.GetLabels()[oam.LabelManageWorkloadTrait]).Should(BeEquivalentTo("true"))
Expect(len(normalTrait.GetLabels())).Should(BeEquivalentTo(1))
Expect(normalTrait.GetLabels()[oam.LabelManageWorkloadTrait]).Should(BeEquivalentTo(""))
})
})

View File

@ -232,6 +232,9 @@ func PrepareBeforeApply(comp *types.ComponentManifest, appRev *v1beta1.Applicati
}
assembledTraits := make([]*unstructured.Unstructured, len(comp.Traits))
HandleCheckManageWorkloadTrait(*appRev, []*types.ComponentManifest{comp})
for i, trait := range comp.Traits {
setTraitLabels(trait, additionalLabel)
assembledTraits[i] = trait
@ -329,3 +332,25 @@ func setTraitLabels(trait *unstructured.Unstructured, additionalLabels map[strin
// add more trait-specific labels here
util.AddLabels(trait, additionalLabels)
}
// HandleCheckManageWorkloadTrait will checkout every trait whether a manage-workload trait, if yes set label and annotation in trait
func HandleCheckManageWorkloadTrait(appRev v1beta1.ApplicationRevision, comps []*types.ComponentManifest) {
traitDefs := appRev.Spec.TraitDefinitions
manageWorkloadTrait := map[string]bool{}
for traitName, definition := range traitDefs {
if definition.Spec.ManageWorkload {
manageWorkloadTrait[traitName] = true
}
}
if len(manageWorkloadTrait) == 0 {
return
}
for _, comp := range comps {
for _, trait := range comp.Traits {
traitType := trait.GetLabels()[oam.TraitTypeLabel]
if manageWorkloadTrait[traitType] {
trait.SetLabels(util.MergeMapOverrideWithDst(trait.GetLabels(), map[string]string{oam.LabelManageWorkloadTrait: "true"}))
}
}
}
}

View File

@ -26,6 +26,7 @@ import (
"sigs.k8s.io/yaml"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
velatypes "github.com/oam-dev/kubevela/apis/types"
"github.com/oam-dev/kubevela/pkg/oam"
)
@ -203,3 +204,49 @@ var _ = Describe("Test Assemble Options", func() {
Expect(wl.GetName()).Should(Equal(workloadName))
})
})
var _ = Describe("Test handleCheckManageWorkloadTrait func", func() {
It("Test every situation", func() {
traitDefs := map[string]v1beta1.TraitDefinition{
"rollout": v1beta1.TraitDefinition{
Spec: v1beta1.TraitDefinitionSpec{
ManageWorkload: true,
},
},
"normal": v1beta1.TraitDefinition{
Spec: v1beta1.TraitDefinitionSpec{},
},
}
appRev := v1beta1.ApplicationRevision{
Spec: v1beta1.ApplicationRevisionSpec{
TraitDefinitions: traitDefs,
},
}
rolloutTrait := &unstructured.Unstructured{}
rolloutTrait.SetLabels(map[string]string{oam.TraitTypeLabel: "rollout"})
normalTrait := &unstructured.Unstructured{}
normalTrait.SetLabels(map[string]string{oam.TraitTypeLabel: "normal"})
workload := unstructured.Unstructured{}
workload.SetLabels(map[string]string{
oam.WorkloadTypeLabel: "webservice",
})
comps := []*velatypes.ComponentManifest{
{
Traits: []*unstructured.Unstructured{
rolloutTrait,
normalTrait,
},
StandardWorkload: &workload,
},
}
HandleCheckManageWorkloadTrait(appRev, comps)
Expect(len(rolloutTrait.GetLabels())).Should(BeEquivalentTo(2))
Expect(rolloutTrait.GetLabels()[oam.LabelManageWorkloadTrait]).Should(BeEquivalentTo("true"))
Expect(len(normalTrait.GetLabels())).Should(BeEquivalentTo(1))
Expect(normalTrait.GetLabels()[oam.LabelManageWorkloadTrait]).Should(BeEquivalentTo(""))
})
})

View File

@ -29,6 +29,7 @@ import (
"github.com/pkg/errors"
apps "k8s.io/api/apps/v1"
core "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
@ -300,7 +301,7 @@ func getAppConfigNameFromLabel(o metav1.Object) string {
func getVersioningPeerWorkloadRefs(ctx context.Context, c client.Reader, wlRef core.ObjectReference, ns string) ([]core.ObjectReference, error) {
o := &unstructured.Unstructured{}
o.SetGroupVersionKind(wlRef.GroupVersionKind())
if err := c.Get(ctx, client.ObjectKey{Namespace: ns, Name: wlRef.Name}, o); err != nil {
if err := c.Get(ctx, client.ObjectKey{Namespace: ns, Name: wlRef.Name}, o); err != nil && !apierrors.IsNotFound(err) {
return nil, err
}

View File

@ -18,6 +18,7 @@ package healthscope
import (
"context"
"encoding/json"
"sort"
"strings"
"sync"
@ -613,12 +614,34 @@ func (r *Reconciler) createWorkloadRefs(ctx context.Context, appRef v1alpha2.App
}, o); err != nil {
continue
}
if labels := o.GetLabels(); labels != nil && labels[oam.WorkloadTypeLabel] != "" {
wlRefs = append(wlRefs, WorkloadReference{
ObjectReference: rs.ObjectReference,
clusterName: rs.Cluster,
envName: decisionsMap[rs.Cluster],
})
if labels := o.GetLabels(); labels != nil {
if labels[oam.WorkloadTypeLabel] != "" {
wlRefs = append(wlRefs, WorkloadReference{
ObjectReference: rs.ObjectReference,
clusterName: rs.Cluster,
envName: decisionsMap[rs.Cluster],
})
} else if labels[oam.TraitTypeLabel] != "" && labels[oam.LabelManageWorkloadTrait] == "true" {
// this means this trait is a manage-Workload trait, get workload GVK and name for trait's annotation
objectRef := corev1.ObjectReference{}
err := json.Unmarshal([]byte(o.GetAnnotations()[oam.AnnotationWorkloadGVK]), &objectRef)
if err != nil {
// don't break whole check process due to this error
continue
}
if o.GetAnnotations() != nil && len(o.GetAnnotations()[oam.AnnotationWorkloadName]) != 0 {
objectRef.Name = o.GetAnnotations()[oam.AnnotationWorkloadName]
} else {
// use component name as default
objectRef.Name = labels[oam.LabelAppComponent]
}
wlRefs = append(wlRefs, WorkloadReference{
ObjectReference: objectRef,
clusterName: rs.Cluster,
envName: decisionsMap[rs.Cluster],
})
}
}
}
}

View File

@ -18,6 +18,7 @@ package rollout
import (
"context"
"encoding/json"
"github.com/pkg/errors"
@ -34,6 +35,8 @@ import (
common2 "github.com/oam-dev/kubevela/pkg/controller/common"
rolloutplan "github.com/oam-dev/kubevela/pkg/controller/common/rollout"
oamctrl "github.com/oam-dev/kubevela/pkg/controller/core.oam.dev"
"github.com/oam-dev/kubevela/pkg/oam"
oamutil "github.com/oam-dev/kubevela/pkg/oam/util"
"github.com/oam-dev/kubevela/pkg/utils/apply"
)
@ -110,6 +113,20 @@ func (r *reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu
return ctrl.Result{}, err
}
if rollout.Status.RollingState == v1alpha1.LocatingTargetAppState {
if rollout.GetAnnotations() == nil || rollout.GetAnnotations()[oam.AnnotationWorkloadName] != h.targetWorkload.GetName() {
gvk := map[string]string{"apiVersion": h.targetWorkload.GetAPIVersion(), "kind": h.targetWorkload.GetKind()}
gvkValue, _ := json.Marshal(gvk)
rollout.SetAnnotations(oamutil.MergeMapOverrideWithDst(rollout.GetAnnotations(),
map[string]string{oam.AnnotationWorkloadName: h.targetWorkload.GetName(), oam.AnnotationWorkloadGVK: string(gvkValue)}))
klog.InfoS("rollout controller set targetWorkload ", h.targetWorkload.GetName(),
"in annotation in rollout namespace: ", rollout.Namespace, " name", rollout.Name, "gvk", gvkValue)
// exit current reconcile before create target workload, this reconcile don't update status just modify annotation
// next round reconcile will create workload and pass `LocatingTargetAppState` phase
return ctrl.Result{}, h.Update(ctx, rollout)
}
}
switch rollout.Status.RollingState {
case v1alpha1.RolloutDeletingState:
removed, err := h.checkWorkloadNotExist(ctx)

View File

@ -125,4 +125,10 @@ const (
// AnnotationLastAppliedConfiguration is kubectl annotations for 3-way merge
AnnotationLastAppliedConfiguration = "kubectl.kubernetes.io/last-applied-configuration"
// AnnotationWorkloadGVK indicates the managed workload's GVK by trait
AnnotationWorkloadGVK = "trait.oam.dev/workload-gvk"
// AnnotationWorkloadName indicates the managed workload's name by trait
AnnotationWorkloadName = "trait.oam.dev/workload-name"
)

View File

@ -66,7 +66,7 @@ func main() {
"Determines the namespace in which the leader election configmap will be created.")
flag.BoolVar(&enableLeaderElection, "enable-leader-election", false,
"Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.")
flag.StringVar(&healthAddr, "health-addr", ":9440", "The address the health endpoint binds to.")
flag.StringVar(&healthAddr, "health-addr", ":19440", "The address the health endpoint binds to.")
flag.Parse()
// setup logging

View File

@ -20,6 +20,7 @@ import (
"context"
"fmt"
"io/ioutil"
"strings"
"time"
. "github.com/onsi/ginkgo"
@ -143,5 +144,70 @@ var _ = Describe("Test MultiClustet Rollout", func() {
}, 500*time.Millisecond, 30*time.Second).Should(BeNil())
verifySucceed(componentName + "-v1")
})
It("Test Rollout with health check policy, guarantee health scope controller work ", func() {
app := &v1beta1.Application{}
appYaml, err := ioutil.ReadFile("./testdata/app/multi-cluster-health-policy.yaml")
Expect(err).Should(Succeed())
Expect(yaml.Unmarshal([]byte(appYaml), app)).Should(Succeed())
app.SetNamespace(namespace)
err = k8sClient.Create(hubCtx, app)
Expect(err).Should(Succeed())
verifySucceed(componentName + "-v1")
Eventually(func() error {
checkApp := v1beta1.Application{}
if err := k8sClient.Get(hubCtx, types.NamespacedName{Namespace: namespace, Name: app.Name}, &checkApp); err != nil {
return err
}
if len(checkApp.Status.Services) == 0 {
return fmt.Errorf("app status service haven't write back")
}
compStatus := checkApp.Status.Services[0]
if compStatus.Env != "staging" {
return fmt.Errorf("comp status env miss-match")
}
if !compStatus.Healthy {
return fmt.Errorf("comp status not healthy")
}
if !strings.Contains(compStatus.Message, "Ready:2/2") {
return fmt.Errorf("comp status workload check don't work")
}
return nil
}, 300*time.Millisecond, 30*time.Second).Should(BeNil())
By("update application to v2")
checkApp := &v1beta1.Application{}
Eventually(func() error {
if err := k8sClient.Get(hubCtx, types.NamespacedName{Namespace: namespace, Name: app.Name}, checkApp); err != nil {
return err
}
checkApp.Spec.Components[0].Properties.Raw = []byte(`{"image": "stefanprodan/podinfo:5.0.2"}`)
if err := k8sClient.Update(hubCtx, checkApp); err != nil {
return err
}
return nil
}, 500*time.Millisecond, 30*time.Second).Should(BeNil())
verifySucceed(componentName + "-v2")
Eventually(func() error {
// Note: KubeVela will only check the workload of the target revision
checkApp := v1beta1.Application{}
if err := k8sClient.Get(hubCtx, types.NamespacedName{Namespace: namespace, Name: app.Name}, &checkApp); err != nil {
return err
}
if len(checkApp.Status.Services) == 0 {
return fmt.Errorf("app status service haven't write back")
}
compStatus := checkApp.Status.Services[0]
if compStatus.Env != "staging" {
return fmt.Errorf("comp status env miss-match")
}
if !compStatus.Healthy {
return fmt.Errorf("comp status not healthy")
}
if !strings.Contains(compStatus.Message, "Ready:2/2") {
return fmt.Errorf("comp status workload check don't work")
}
return nil
}, 300*time.Millisecond, 30*time.Second).Should(BeNil())
})
})
})

View File

@ -0,0 +1,48 @@
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: example-app-rollout
namespace: default
spec:
components:
- name: hello-world-server
type: webservice
properties:
image: crccheck/hello-world
port: 8000
type: webservice
traits:
- type: rollout
properties:
targetSize: 2
rolloutBatches:
- replicas: 1
- replicas: 1
policies:
- name: example-multi-env-policy
type: env-binding
properties:
envs:
- name: staging
placement: # 选择要部署的集群,并执行默认的发布策略
clusterSelector:
name: cluster-worker
- name: health-policy-demo
type: health
properties:
probeInterval: 5
probeTimeout: 10
workflow:
steps:
# 部署到预发环境中
- name: deploy-staging
type: deploy2env
properties:
policy: example-multi-env-policy
env: staging

View File

@ -18,9 +18,12 @@ package controllers_test
import (
"context"
"encoding/json"
"fmt"
"time"
"github.com/oam-dev/kubevela/pkg/oam"
"sigs.k8s.io/yaml"
v1 "k8s.io/api/apps/v1"
@ -113,10 +116,22 @@ var _ = Describe("rollout related e2e-test,rollout trait test", func() {
return fmt.Errorf("error rollout status state %s", rollout.Status.RollingState)
}
compRevName = rollout.Spec.TargetRevisionName
if rollout.GetAnnotations() == nil || rollout.GetAnnotations()[oam.AnnotationWorkloadName] != componentRevision {
return fmt.Errorf("target workload name annotation missmatch want %s acctually %s",
rollout.GetAnnotations()[oam.AnnotationWorkloadName], componentRevision)
}
deployKey := types.NamespacedName{Namespace: namespaceName, Name: compRevName}
if err := k8sClient.Get(ctx, deployKey, &targerDeploy); err != nil {
return err
}
gvkStr := rollout.GetAnnotations()[oam.AnnotationWorkloadGVK]
gvk := map[string]string{}
if err := json.Unmarshal([]byte(gvkStr), &gvk); err != nil {
return err
}
if gvk["apiVersion"] != "apps/v1" || gvk["kind"] != "Deployment" {
return fmt.Errorf("error targetWorkload gvk")
}
if *targerDeploy.Spec.Replicas != *rollout.Spec.RolloutPlan.TargetSize {
return fmt.Errorf("targetDeploy replicas missMatch %d, %d", targerDeploy.Spec.Replicas, rollout.Spec.RolloutPlan.TargetSize)
}