Chore: remove legacy rollout and scope (#6068)

* Chore: remove legacy rollout & scope

Signed-off-by: Somefive <yd219913@alibaba-inc.com>

* remove outdated params

Signed-off-by: Somefive <yd219913@alibaba-inc.com>

* fix

Signed-off-by: Somefive <yd219913@alibaba-inc.com>

---------

Signed-off-by: Somefive <yd219913@alibaba-inc.com>
This commit is contained in:
Somefive 2023-06-05 10:57:38 +08:00 committed by GitHub
parent 057e640ce2
commit f15eba2c5f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
99 changed files with 40 additions and 5761 deletions

4
.github/CODEOWNERS vendored
View File

@ -16,10 +16,6 @@ pkg/stdlib @leejanee @FogDong @Somefive
# Owner of Workflow
pkg/workflow @leejanee @FogDong @Somefive @wangyikewxgm @chivalryq
# Owner of rollout
pkg/controller/common/rollout/ @wangyikewxgm @wonderflow
runtime/rollout @wangyikewxgm @wonderflow
# Owner of vela templates
vela-templates/ @Somefive @barnettZQG @wonderflow @FogDong @wangyikewxgm @chivalryq

View File

@ -7,10 +7,6 @@ on:
- "v*"
workflow_dispatch: {}
env:
ACCESS_KEY: ${{ secrets.OSS_ACCESS_KEY }}
ACCESS_KEY_SECRET: ${{ secrets.OSS_ACCESS_KEY_SECRET }}
permissions:
contents: read

2
.gitignore vendored
View File

@ -50,8 +50,6 @@ tmp/
# check docs
git-page/
# e2e rollout runtime image build
runtime/rollout/e2e/tmp
vela.json
dist/

View File

@ -88,10 +88,6 @@ ifneq ($(shell docker images -q $(VELA_CORE_TEST_IMAGE)),)
docker rmi -f $(VELA_CORE_TEST_IMAGE)
endif
ifneq ($(shell docker images -q $(VELA_RUNTIME_ROLLOUT_TEST_IMAGE)),)
docker rmi -f $(VELA_RUNTIME_ROLLOUT_TEST_IMAGE)
endif
endif
## image-load: load docker image to the kind cluster
@ -99,14 +95,6 @@ image-load:
docker build -t $(VELA_CORE_TEST_IMAGE) -f Dockerfile.e2e .
kind load docker-image $(VELA_CORE_TEST_IMAGE) || { echo >&2 "kind not installed or error loading image: $(VELA_CORE_TEST_IMAGE)"; exit 1; }
## image-load-runtime-cluster: Load the run-time cluster image
image-load-runtime-cluster:
/bin/sh hack/e2e/build_runtime_rollout.sh
docker build -t $(VELA_RUNTIME_ROLLOUT_TEST_IMAGE) -f runtime/rollout/e2e/Dockerfile.e2e runtime/rollout/e2e/
rm -rf runtime/rollout/e2e/tmp
kind load docker-image $(VELA_RUNTIME_ROLLOUT_TEST_IMAGE) || { echo >&2 "kind not installed or error loading image: $(VELA_RUNTIME_ROLLOUT_TEST_IMAGE)"; exit 1; }
kind load docker-image $(VELA_RUNTIME_ROLLOUT_TEST_IMAGE) --name=$(RUNTIME_CLUSTER_NAME) || echo "no worker cluster"
## core-test: Run tests
core-test:
go test ./pkg/... -coverprofile cover.out
@ -115,10 +103,6 @@ core-test:
manager:
$(GOBUILD_ENV) go build -o bin/manager -a -ldflags $(LDFLAGS) ./cmd/core/main.go
## vela-runtime-rollout-manager: Build vela runtime rollout manager binary
vela-runtime-rollout-manager:
$(GOBUILD_ENV) go build -o ./runtime/rollout/bin/manager -a -ldflags $(LDFLAGS) ./runtime/rollout/cmd/main.go
## manifests: Generate manifests e.g. CRD, RBAC etc.
manifests: installcue kustomize
go generate $(foreach t,pkg apis,./$(t)/...)

View File

@ -29,7 +29,6 @@ import (
workflowv1alpha1 "github.com/kubevela/workflow/api/v1alpha1"
"github.com/oam-dev/kubevela/apis/core.oam.dev/condition"
"github.com/oam-dev/kubevela/apis/standard.oam.dev/v1alpha1"
"github.com/oam-dev/kubevela/pkg/oam"
)
@ -358,19 +357,6 @@ const (
WorkflowStepType DefinitionType = "WorkflowStep"
)
// AppRolloutStatus defines the observed state of AppRollout
type AppRolloutStatus struct {
v1alpha1.RolloutStatus `json:",inline"`
// LastUpgradedTargetAppRevision contains the name of the app that we upgraded to
// We will restart the rollout if this is not the same as the spec
LastUpgradedTargetAppRevision string `json:"lastTargetAppRevision"`
// LastSourceAppRevision contains the name of the app that we need to upgrade from.
// We will restart the rollout if this is not the same as the spec
LastSourceAppRevision string `json:"LastSourceAppRevision,omitempty"`
}
// ApplicationTrait defines the trait of application
type ApplicationTrait struct {
Type string `json:"type"`
@ -414,29 +400,11 @@ type ClusterSelector struct {
Labels map[string]string `json:"labels,omitempty"`
}
// Distribution defines the replica distribution of an AppRevision to a cluster.
type Distribution struct {
// Replicas is the replica number.
Replicas int `json:"replicas,omitempty"`
}
// ClusterPlacement defines the cluster placement rules for an app revision.
type ClusterPlacement struct {
// ClusterSelector selects the cluster to deploy apps to.
// If not specified, it indicates the host cluster per se.
ClusterSelector *ClusterSelector `json:"clusterSelector,omitempty"`
// Distribution defines the replica distribution of an AppRevision to a cluster.
Distribution Distribution `json:"distribution,omitempty"`
}
const (
// PolicyResourceCreator create the policy resource.
PolicyResourceCreator string = "policy"
// WorkflowResourceCreator create the resource in workflow.
WorkflowResourceCreator string = "workflow"
// DebugResourceCreator create the debug resource.
DebugResourceCreator string = "debug"
)
// OAMObjectReference defines the object reference for an oam resource
@ -533,8 +501,6 @@ const (
RenderCondition
// WorkflowCondition indicates whether workflow processing is successful.
WorkflowCondition
// RolloutCondition indicates whether rollout processing is successful.
RolloutCondition
// ReadyCondition indicates whether whole application processing is successful.
ReadyCondition
)
@ -545,7 +511,6 @@ var conditions = map[ApplicationConditionType]string{
PolicyCondition: "Policy",
RenderCondition: "Render",
WorkflowCondition: "Workflow",
RolloutCondition: "Rollout",
ReadyCondition: "Ready",
}

View File

@ -28,22 +28,6 @@ import (
"k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AppRolloutStatus) DeepCopyInto(out *AppRolloutStatus) {
*out = *in
in.RolloutStatus.DeepCopyInto(&out.RolloutStatus)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppRolloutStatus.
func (in *AppRolloutStatus) DeepCopy() *AppRolloutStatus {
if in == nil {
return nil
}
out := new(AppRolloutStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AppStatus) DeepCopyInto(out *AppStatus) {
*out = *in
@ -257,27 +241,6 @@ func (in *ClusterObjectReference) DeepCopy() *ClusterObjectReference {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterPlacement) DeepCopyInto(out *ClusterPlacement) {
*out = *in
if in.ClusterSelector != nil {
in, out := &in.ClusterSelector, &out.ClusterSelector
*out = new(ClusterSelector)
(*in).DeepCopyInto(*out)
}
out.Distribution = in.Distribution
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterPlacement.
func (in *ClusterPlacement) DeepCopy() *ClusterPlacement {
if in == nil {
return nil
}
out := new(ClusterPlacement)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterSelector) DeepCopyInto(out *ClusterSelector) {
*out = *in
@ -315,21 +278,6 @@ func (in *DefinitionReference) DeepCopy() *DefinitionReference {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Distribution) DeepCopyInto(out *Distribution) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Distribution.
func (in *Distribution) DeepCopy() *Distribution {
if in == nil {
return nil
}
out := new(Distribution)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Helm) DeepCopyInto(out *Helm) {
*out = *in

View File

@ -29,18 +29,6 @@ import (
"github.com/oam-dev/kubevela/apis/core.oam.dev/condition"
)
const (
// TypeHealthy application are believed to be determined as healthy by a health scope.
TypeHealthy condition.ConditionType = "Healthy"
)
// Reasons an application is or is not healthy
const (
ReasonHealthy condition.ConditionReason = "AllComponentsHealthy"
ReasonUnhealthy condition.ConditionReason = "UnhealthyOrUnknownComponents"
ReasonHealthCheckErr condition.ConditionReason = "HealthCheckeError"
)
// AppPolicy defines a global policy for all components in the app.
type AppPolicy struct {
// Name is the unique name of the policy.
@ -74,8 +62,6 @@ type ApplicationSpec struct {
// - will have a context in annotation.
// - should mark "finish" phase in status.conditions.
Workflow *Workflow `json:"workflow,omitempty"`
// TODO(wonderflow): we should have application level scopes supported here
}
// +kubebuilder:object:root=true

View File

@ -53,18 +53,12 @@ type ApplicationRevisionCompressibleFields struct {
// TraitDefinitions records the snapshot of the traitDefinitions related with the created/modified Application
TraitDefinitions map[string]*TraitDefinition `json:"traitDefinitions,omitempty"`
// ScopeDefinitions records the snapshot of the scopeDefinitions related with the created/modified Application
ScopeDefinitions map[string]ScopeDefinition `json:"scopeDefinitions,omitempty"`
// PolicyDefinitions records the snapshot of the PolicyDefinitions related with the created/modified Application
PolicyDefinitions map[string]PolicyDefinition `json:"policyDefinitions,omitempty"`
// WorkflowStepDefinitions records the snapshot of the WorkflowStepDefinitions related with the created/modified Application
WorkflowStepDefinitions map[string]*WorkflowStepDefinition `json:"workflowStepDefinitions,omitempty"`
// ScopeGVK records the apiVersion to GVK mapping
ScopeGVK map[string]metav1.GroupVersionKind `json:"scopeGVK,omitempty"`
// Policies records the external policies
Policies map[string]v1alpha1.Policy `json:"policies,omitempty"`

View File

@ -38,8 +38,6 @@ func TestApplicationRevisionCompression(t *testing.T) {
spec.WorkloadDefinitions["def"] = WorkloadDefinition{Spec: WorkloadDefinitionSpec{Reference: common.DefinitionReference{Name: "testdef"}}}
spec.TraitDefinitions = make(map[string]*TraitDefinition)
spec.TraitDefinitions["def"] = &TraitDefinition{Spec: TraitDefinitionSpec{ControlPlaneOnly: true}}
spec.ScopeDefinitions = make(map[string]ScopeDefinition)
spec.ScopeDefinitions["def"] = ScopeDefinition{Spec: ScopeDefinitionSpec{AllowComponentOverlap: true}}
spec.PolicyDefinitions = make(map[string]PolicyDefinition)
spec.PolicyDefinitions["def"] = PolicyDefinition{Spec: PolicyDefinitionSpec{ManageHealthCheck: true}}
spec.WorkflowStepDefinitions = make(map[string]*WorkflowStepDefinition)

View File

@ -232,49 +232,3 @@ type TraitDefinitionList struct {
metav1.ListMeta `json:"metadata,omitempty"`
Items []TraitDefinition `json:"items"`
}
// A ScopeDefinitionSpec defines the desired state of a ScopeDefinition.
type ScopeDefinitionSpec struct {
// Reference to the CustomResourceDefinition that defines this scope kind.
Reference common.DefinitionReference `json:"definitionRef"`
// WorkloadRefsPath indicates if/where a scope accepts workloadRef objects
WorkloadRefsPath string `json:"workloadRefsPath,omitempty"`
// AllowComponentOverlap specifies whether an OAM component may exist in
// multiple instances of this kind of scope.
AllowComponentOverlap bool `json:"allowComponentOverlap"`
// Extension is used for extension needs by OAM platform builders
// +optional
// +kubebuilder:pruning:PreserveUnknownFields
Extension *runtime.RawExtension `json:"extension,omitempty"`
}
// +kubebuilder:object:root=true
// A ScopeDefinition registers a kind of Kubernetes custom resource as a valid
// OAM scope kind by referencing its CustomResourceDefinition. The CRD is used
// to validate the schema of the scope when it is embedded in an OAM
// ApplicationConfiguration.
// +kubebuilder:printcolumn:JSONPath=".spec.definitionRef.name",name=DEFINITION-NAME,type=string
// +kubebuilder:resource:scope=Namespaced,categories={oam},shortName=scope
// +kubebuilder:storageversion
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type ScopeDefinition struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec ScopeDefinitionSpec `json:"spec,omitempty"`
}
// +kubebuilder:object:root=true
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ScopeDefinitionList contains a list of ScopeDefinition.
type ScopeDefinitionList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []ScopeDefinition `json:"items"`
}

View File

@ -107,14 +107,6 @@ var (
ApplicationRevisionGroupVersionKind = SchemeGroupVersion.WithKind(ApplicationRevisionKind)
)
// ScopeDefinition type metadata.
var (
ScopeDefinitionKind = reflect.TypeOf(ScopeDefinition{}).Name()
ScopeDefinitionGroupKind = schema.GroupKind{Group: Group, Kind: ScopeDefinitionKind}.String()
ScopeDefinitionKindAPIVersion = ScopeDefinitionKind + "." + SchemeGroupVersion.String()
ScopeDefinitionGroupVersionKind = SchemeGroupVersion.WithKind(ScopeDefinitionKind)
)
// ResourceTracker type metadata.
var (
ResourceTrackerKind = reflect.TypeOf(ResourceTracker{}).Name()
@ -130,7 +122,6 @@ func init() {
SchemeBuilder.Register(&PolicyDefinition{}, &PolicyDefinitionList{})
SchemeBuilder.Register(&WorkflowStepDefinition{}, &WorkflowStepDefinitionList{})
SchemeBuilder.Register(&DefinitionRevision{}, &DefinitionRevisionList{})
SchemeBuilder.Register(&ScopeDefinition{}, &ScopeDefinitionList{})
SchemeBuilder.Register(&Application{}, &ApplicationList{})
SchemeBuilder.Register(&ApplicationRevision{}, &ApplicationRevisionList{})
SchemeBuilder.Register(&ResourceTracker{}, &ResourceTrackerList{})

View File

@ -23,7 +23,6 @@ package v1beta1
import (
"github.com/kubevela/workflow/api/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
@ -177,13 +176,6 @@ func (in *ApplicationRevisionCompressibleFields) DeepCopyInto(out *ApplicationRe
(*out)[key] = outVal
}
}
if in.ScopeDefinitions != nil {
in, out := &in.ScopeDefinitions, &out.ScopeDefinitions
*out = make(map[string]ScopeDefinition, len(*in))
for key, val := range *in {
(*out)[key] = *val.DeepCopy()
}
}
if in.PolicyDefinitions != nil {
in, out := &in.PolicyDefinitions, &out.PolicyDefinitions
*out = make(map[string]PolicyDefinition, len(*in))
@ -206,13 +198,6 @@ func (in *ApplicationRevisionCompressibleFields) DeepCopyInto(out *ApplicationRe
(*out)[key] = outVal
}
}
if in.ScopeGVK != nil {
in, out := &in.ScopeGVK, &out.ScopeGVK
*out = make(map[string]v1.GroupVersionKind, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.Policies != nil {
in, out := &in.Policies, &out.Policies
*out = make(map[string]core_oam_devv1alpha1.Policy, len(*in))
@ -806,85 +791,6 @@ func (in *ResourceTrackerStatus) DeepCopy() *ResourceTrackerStatus {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ScopeDefinition) DeepCopyInto(out *ScopeDefinition) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopeDefinition.
func (in *ScopeDefinition) DeepCopy() *ScopeDefinition {
if in == nil {
return nil
}
out := new(ScopeDefinition)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ScopeDefinition) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ScopeDefinitionList) DeepCopyInto(out *ScopeDefinitionList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ScopeDefinition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopeDefinitionList.
func (in *ScopeDefinitionList) DeepCopy() *ScopeDefinitionList {
if in == nil {
return nil
}
out := new(ScopeDefinitionList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ScopeDefinitionList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ScopeDefinitionSpec) DeepCopyInto(out *ScopeDefinitionSpec) {
*out = *in
out.Reference = in.Reference
if in.Extension != nil {
in, out := &in.Extension, &out.Extension
*out = new(runtime.RawExtension)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopeDefinitionSpec.
func (in *ScopeDefinitionSpec) DeepCopy() *ScopeDefinitionSpec {
if in == nil {
return nil
}
out := new(ScopeDefinitionSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TraitDefinition) DeepCopyInto(out *TraitDefinition) {
*out = *in

View File

@ -1,43 +0,0 @@
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package v1alpha1 contains API Schema definitions for the standard v1alpha1 API group
// +kubebuilder:object:generate=true
// +groupName=standard.oam.dev
package v1alpha1
import (
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/controller-runtime/pkg/scheme"
)
const (
// GroupName of the CRDs
GroupName = "standard.oam.dev"
// Version of the group of CRDs
Version = "v1alpha1"
)
var (
// SchemeGroupVersion is group version used to register these objects
SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: Version}
// SchemeBuilder is used to add go types to the GroupVersionKind scheme
SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion}
// AddToScheme adds the types in this group-version to the given scheme.
AddToScheme = SchemeBuilder.AddToScheme
)

View File

@ -1,35 +0,0 @@
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"reflect"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// Rollout type metadata
var (
RolloutKind = reflect.TypeOf(Rollout{}).Name()
RolloutGroupKind = schema.GroupKind{Group: GroupName, Kind: RolloutKind}.String()
RolloutKindAPIVersion = RolloutKind + "." + SchemeGroupVersion.String()
RolloutKindVersionKind = SchemeGroupVersion.WithKind(RolloutKind)
)
func init() {
SchemeBuilder.Register(&Rollout{}, &RolloutList{})
}

View File

@ -1,285 +0,0 @@
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"github.com/oam-dev/kubevela/apis/core.oam.dev/condition"
)
// RolloutStrategyType defines strategies for pods rollout
type RolloutStrategyType string
const (
// IncreaseFirstRolloutStrategyType indicates that we increase the target resources first
IncreaseFirstRolloutStrategyType RolloutStrategyType = "IncreaseFirst"
// DecreaseFirstRolloutStrategyType indicates that we decrease the source resources first
DecreaseFirstRolloutStrategyType RolloutStrategyType = "DecreaseFirst"
)
// HookType can be pre, post or during rollout
type HookType string
const (
// InitializeRolloutHook execute webhook during the rollout initializing phase
InitializeRolloutHook HookType = "initialize-rollout"
// PreBatchRolloutHook execute webhook before each batch rollout
PreBatchRolloutHook HookType = "pre-batch-rollout"
// PostBatchRolloutHook execute webhook after each batch rollout
PostBatchRolloutHook HookType = "post-batch-rollout"
// FinalizeRolloutHook execute the webhook during the rollout finalizing phase
FinalizeRolloutHook HookType = "finalize-rollout"
)
// RollingState is the overall rollout state
type RollingState string
const (
// LocatingTargetAppState indicates that the rollout is in the stage of locating target app
// we use this state to make sure we special handle the target app successfully only once
LocatingTargetAppState RollingState = "locatingTargetApp"
// VerifyingSpecState indicates that the rollout is in the stage of verifying the rollout settings
// and the controller can locate both the target and the source
VerifyingSpecState RollingState = "verifyingSpec"
// InitializingState indicates that the rollout is initializing all the new resources
InitializingState RollingState = "initializing"
// RollingInBatchesState indicates that the rollout starts rolling
RollingInBatchesState RollingState = "rollingInBatches"
// FinalisingState indicates that the rollout is finalizing, possibly clean up the old resources, adjust traffic
FinalisingState RollingState = "finalising"
// RolloutFailingState indicates that the rollout is failing
// one needs to finalize it before mark it as failed by cleaning up the old resources, adjust traffic
RolloutFailingState RollingState = "rolloutFailing"
// RolloutSucceedState indicates that rollout successfully completed to match the desired target state
RolloutSucceedState RollingState = "rolloutSucceed"
// RolloutAbandoningState indicates that the rollout is being abandoned
// we need to finalize it by cleaning up the old resources, adjust traffic and return control back to its owner
RolloutAbandoningState RollingState = "rolloutAbandoning"
// RolloutDeletingState indicates that the rollout is being deleted
// we need to finalize it by cleaning up the old resources, adjust traffic and return control back to its owner
RolloutDeletingState RollingState = "RolloutDeletingState"
// RolloutFailedState indicates that rollout is failed, the target replica is not reached
// we can not move forward anymore, we will let the client to decide when or whether to revert.
RolloutFailedState RollingState = "rolloutFailed"
)
// BatchRollingState is the sub state when the rollout is on the fly
type BatchRollingState string
const (
// BatchInitializingState still rolling the batch, the batch rolling is not completed yet
BatchInitializingState BatchRollingState = "batchInitializing"
// BatchInRollingState still rolling the batch, the batch rolling is not completed yet
BatchInRollingState BatchRollingState = "batchInRolling"
// BatchVerifyingState verifying if the application is ready to roll.
BatchVerifyingState BatchRollingState = "batchVerifying"
// BatchRolloutFailedState indicates that the batch didn't get the manual or automatic approval
BatchRolloutFailedState BatchRollingState = "batchVerifyFailed"
// BatchFinalizingState indicates that all the pods in the are available, we can move on to the next batch
BatchFinalizingState BatchRollingState = "batchFinalizing"
// BatchReadyState indicates that all the pods in the are upgraded and its state is ready
BatchReadyState BatchRollingState = "batchReady"
)
// RolloutPlan fines the details of the rollout plan
type RolloutPlan struct {
// RolloutStrategy defines strategies for the rollout plan
// The default is IncreaseFirstRolloutStrategyType
// +optional
RolloutStrategy RolloutStrategyType `json:"rolloutStrategy,omitempty"`
// The size of the target resource. The default is the same
// as the size of the source resource.
// +optional
TargetSize *int32 `json:"targetSize,omitempty"`
// The number of batches, default = 1
// +optional
NumBatches *int32 `json:"numBatches,omitempty"`
// The exact distribution among batches.
// its size has to be exactly the same as the NumBatches (if set)
// The total number cannot exceed the targetSize or the size of the source resource
// We will IGNORE the last batch's replica field if it's a percentage since round errors can lead to inaccurate sum
// We highly recommend to leave the last batch's replica field empty
// +optional
RolloutBatches []RolloutBatch `json:"rolloutBatches,omitempty"`
// All pods in the batches up to the batchPartition (included) will have
// the target resource specification while the rest still have the source resource
// This is designed for the operators to manually rollout
// Default is the the number of batches which will rollout all the batches
// +optional
BatchPartition *int32 `json:"batchPartition,omitempty"`
// Paused the rollout, default is false
// +optional
Paused bool `json:"paused,omitempty"`
// RolloutWebhooks provide a way for the rollout to interact with an external process
// +optional
RolloutWebhooks []RolloutWebhook `json:"rolloutWebhooks,omitempty"`
// CanaryMetric provides a way for the rollout process to automatically check certain metrics
// before complete the process
// +optional
CanaryMetric []CanaryMetric `json:"canaryMetric,omitempty"`
}
// RolloutBatch is used to describe how the each batch rollout should be
type RolloutBatch struct {
// Replicas is the number of pods to upgrade in this batch
// it can be an absolute number (ex: 5) or a percentage of total pods
// we will ignore the percentage of the last batch to just fill the gap
// +optional
// it is mutually exclusive with the PodList field
Replicas intstr.IntOrString `json:"replicas,omitempty"`
// The list of Pods to get upgraded
// +optional
// it is mutually exclusive with the Replicas field
PodList []string `json:"podList,omitempty"`
// MaxUnavailable is the max allowed number of pods that is unavailable
// during the upgrade. We will mark the batch as ready as long as there are less
// or equal number of pods unavailable than this number.
// default = 0
// +optional
MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"`
// The wait time, in seconds, between instances upgrades, default = 0
// +optional
InstanceInterval *int32 `json:"instanceInterval,omitempty"`
// RolloutWebhooks provides a way for the batch rollout to interact with an external process
// +optional
BatchRolloutWebhooks []RolloutWebhook `json:"batchRolloutWebhooks,omitempty"`
// CanaryMetric provides a way for the batch rollout process to automatically check certain metrics
// before moving to the next batch
// +optional
CanaryMetric []CanaryMetric `json:"canaryMetric,omitempty"`
}
// RolloutWebhook holds the reference to external checks used for canary analysis
type RolloutWebhook struct {
// Type of this webhook
Type HookType `json:"type"`
// Name of this webhook
Name string `json:"name"`
// URL address of this webhook
URL string `json:"url"`
// Method the HTTP call method, default is POST
Method string `json:"method,omitempty"`
// ExpectedStatus contains all the expected http status code that we will accept as success
ExpectedStatus []int `json:"expectedStatus,omitempty"`
// Metadata (key-value pairs) for this webhook
// +optional
Metadata *map[string]string `json:"metadata,omitempty"`
}
// RolloutWebhookPayload holds the info and metadata sent to webhooks
type RolloutWebhookPayload struct {
// Name of the upgrading resource
Name string `json:"name"`
// Namespace of the upgrading resource
Namespace string `json:"namespace"`
// Phase of the rollout
Phase string `json:"phase"`
// Metadata (key-value pairs) are the extra data send to this webhook
Metadata map[string]string `json:"metadata,omitempty"`
}
// CanaryMetric holds the reference to metrics used for canary analysis
type CanaryMetric struct {
// Name of the metric
Name string `json:"name"`
// Interval represents the windows size
Interval string `json:"interval,omitempty"`
// Range value accepted for this metric
// +optional
MetricsRange *MetricsExpectedRange `json:"metricsRange,omitempty"`
// TemplateRef references a metric template object
// +optional
TemplateRef *corev1.ObjectReference `json:"templateRef,omitempty"`
}
// MetricsExpectedRange defines the range used for metrics validation
type MetricsExpectedRange struct {
// Minimum value
// +optional
Min *intstr.IntOrString `json:"min,omitempty"`
// Maximum value
// +optional
Max *intstr.IntOrString `json:"max,omitempty"`
}
// RolloutStatus defines the observed state of a rollout plan
type RolloutStatus struct {
// Conditions represents the latest available observations of a CloneSet's current state.
condition.ConditionedStatus `json:",inline"`
// RolloutTargetSize is the size of the target resources. This is determined once the initial spec verification
// and does not change until the rollout is restarted
RolloutOriginalSize int32 `json:"rolloutOriginalSize,omitempty"`
// RolloutTargetSize is the size of the target resources. This is determined once the initial spec verification
// and does not change until the rollout is restarted
RolloutTargetSize int32 `json:"rolloutTargetSize,omitempty"`
// NewPodTemplateIdentifier is a string that uniquely represent the new pod template
// each workload type could use different ways to identify that so we cannot compare between resources
NewPodTemplateIdentifier string `json:"targetGeneration,omitempty"`
// lastAppliedPodTemplateIdentifier is a string that uniquely represent the last pod template
// each workload type could use different ways to identify that so we cannot compare between resources
// We update this field only after a successful rollout
LastAppliedPodTemplateIdentifier string `json:"lastAppliedPodTemplateIdentifier,omitempty"`
// RollingState is the Rollout State
RollingState RollingState `json:"rollingState"`
// BatchRollingState only meaningful when the Status is rolling
// +optional
BatchRollingState BatchRollingState `json:"batchRollingState"`
// The current batch the rollout is working on/blocked
// it starts from 0
CurrentBatch int32 `json:"currentBatch"`
// UpgradedReplicas is the number of Pods upgraded by the rollout controller
UpgradedReplicas int32 `json:"upgradedReplicas"`
// UpgradedReadyReplicas is the number of Pods upgraded by the rollout controller that have a Ready Condition.
UpgradedReadyReplicas int32 `json:"upgradedReadyReplicas"`
}

View File

@ -1,430 +0,0 @@
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"fmt"
"time"
"github.com/oam-dev/kubevela/apis/core.oam.dev/condition"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/klog/v2"
)
// RolloutEvent is used to describe the events during rollout
type RolloutEvent string
const (
// RollingFailedEvent indicates that we encountered an unexpected error during upgrading and can't be retried
RollingFailedEvent RolloutEvent = "RollingFailedEvent"
// RollingRetriableFailureEvent indicates that we encountered an unexpected but retriable error
RollingRetriableFailureEvent RolloutEvent = "RollingRetriableFailureEvent"
// AppLocatedEvent indicates that apps are located successfully
AppLocatedEvent RolloutEvent = "AppLocatedEvent"
// RollingModifiedEvent indicates that the rolling target or source has changed
RollingModifiedEvent RolloutEvent = "RollingModifiedEvent"
// RollingDeletedEvent indicates that the rolling is being deleted
RollingDeletedEvent RolloutEvent = "RollingDeletedEvent"
// RollingSpecVerifiedEvent indicates that we have successfully verified that the rollout spec
RollingSpecVerifiedEvent RolloutEvent = "RollingSpecVerifiedEvent"
// RollingInitializedEvent indicates that we have finished initializing all the workload resources
RollingInitializedEvent RolloutEvent = "RollingInitializedEvent"
// AllBatchFinishedEvent indicates that all batches are upgraded
AllBatchFinishedEvent RolloutEvent = "AllBatchFinishedEvent"
// RollingFinalizedEvent indicates that we have finalized the rollout which includes but not
// limited to the resource garbage collection
RollingFinalizedEvent RolloutEvent = "AllBatchFinishedEvent"
// InitializedOneBatchEvent indicates that we have successfully rolled out one batch
InitializedOneBatchEvent RolloutEvent = "InitializedOneBatchEvent"
// FinishedOneBatchEvent indicates that we have successfully rolled out one batch
FinishedOneBatchEvent RolloutEvent = "FinishedOneBatchEvent"
// RolloutOneBatchEvent indicates that we have rollout one batch
RolloutOneBatchEvent RolloutEvent = "RolloutOneBatchEvent"
// OneBatchAvailableEvent indicates that the batch resource is considered available
// this events comes after we have examine the pod readiness check and traffic shifting if needed
OneBatchAvailableEvent RolloutEvent = "OneBatchAvailable"
// BatchRolloutApprovedEvent indicates that we got the approval manually
BatchRolloutApprovedEvent RolloutEvent = "BatchRolloutApprovedEvent"
// BatchRolloutFailedEvent indicates that we are waiting for the approval of the
BatchRolloutFailedEvent RolloutEvent = "BatchRolloutFailedEvent"
)
// These are valid conditions of the rollout.
const (
// RolloutSpecVerifying indicates that the rollout just started with verification
RolloutSpecVerifying condition.ConditionType = "RolloutSpecVerifying"
// RolloutInitializing means we start to initialize the cluster
RolloutInitializing condition.ConditionType = "RolloutInitializing"
// RolloutInProgress means we are upgrading resources.
RolloutInProgress condition.ConditionType = "RolloutInProgress"
// RolloutFinalizing means the rollout is finalizing
RolloutFinalizing condition.ConditionType = "RolloutFinalizing"
// RolloutFailing means the rollout is failing
RolloutFailing condition.ConditionType = "RolloutFailing"
// RolloutAbandoning means that the rollout is being abandoned.
RolloutAbandoning condition.ConditionType = "RolloutAbandoning"
// RolloutDeleting means that the rollout is being deleted.
RolloutDeleting condition.ConditionType = "RolloutDeleting"
// RolloutFailed means that the rollout failed.
RolloutFailed condition.ConditionType = "RolloutFailed"
// RolloutSucceed means that the rollout is done.
RolloutSucceed condition.ConditionType = "RolloutSucceed"
// BatchInitializing
BatchInitializing condition.ConditionType = "BatchInitializing"
// BatchPaused
BatchPaused condition.ConditionType = "BatchPaused"
// BatchVerifying
BatchVerifying condition.ConditionType = "BatchVerifying"
// BatchRolloutFailed
BatchRolloutFailed condition.ConditionType = "BatchRolloutFailed"
// BatchFinalizing
BatchFinalizing condition.ConditionType = "BatchFinalizing"
// BatchReady
BatchReady condition.ConditionType = "BatchReady"
)
// NewPositiveCondition creates a positive condition type
func NewPositiveCondition(condType condition.ConditionType) condition.Condition {
return condition.Condition{
Type: condType,
Status: v1.ConditionTrue,
LastTransitionTime: metav1.NewTime(time.Now()),
}
}
// NewNegativeCondition creates a false condition type
func NewNegativeCondition(condType condition.ConditionType, message string) condition.Condition {
return condition.Condition{
Type: condType,
Status: v1.ConditionFalse,
LastTransitionTime: metav1.NewTime(time.Now()),
Message: message,
}
}
const invalidRollingStateTransition = "the rollout state transition from `%s` state with `%s` is invalid"
const invalidBatchRollingStateTransition = "the batch rolling state transition from `%s` state with `%s` is invalid"
func (r *RolloutStatus) getRolloutConditionType() condition.ConditionType {
// figure out which condition type should we put in the condition depends on its state
switch r.RollingState {
case VerifyingSpecState:
return RolloutSpecVerifying
case InitializingState:
return RolloutInitializing
case RollingInBatchesState:
switch r.BatchRollingState {
case BatchInitializingState:
return BatchInitializing
case BatchVerifyingState:
return BatchVerifying
case BatchFinalizingState:
return BatchFinalizing
case BatchRolloutFailedState:
return BatchRolloutFailed
case BatchReadyState:
return BatchReady
default:
return RolloutInProgress
}
case FinalisingState:
return RolloutFinalizing
case RolloutFailingState:
return RolloutFailing
case RolloutAbandoningState:
return RolloutAbandoning
case RolloutDeletingState:
return RolloutDeleting
case RolloutSucceedState:
return RolloutSucceed
default:
return RolloutFailed
}
}
// RolloutRetry is a special state transition since we need an error message
func (r *RolloutStatus) RolloutRetry(reason string) {
// we can still retry, no change on the state
r.SetConditions(NewNegativeCondition(r.getRolloutConditionType(), reason))
}
// RolloutFailed is a special state transition since we need an error message
func (r *RolloutStatus) RolloutFailed(reason string) {
// set the condition first which depends on the state
r.SetConditions(NewNegativeCondition(r.getRolloutConditionType(), reason))
r.RollingState = RolloutFailedState
}
// RolloutFailing is a special state transition that always moves the rollout state to the failing state
func (r *RolloutStatus) RolloutFailing(reason string) {
// set the condition first which depends on the state
r.SetConditions(NewNegativeCondition(r.getRolloutConditionType(), reason))
r.RollingState = RolloutFailingState
r.BatchRollingState = BatchInitializingState
}
// ResetStatus resets the status of the rollout to start from beginning
func (r *RolloutStatus) ResetStatus() {
r.NewPodTemplateIdentifier = ""
r.RolloutTargetSize = -1
r.LastAppliedPodTemplateIdentifier = ""
r.RollingState = LocatingTargetAppState
r.BatchRollingState = BatchInitializingState
r.CurrentBatch = 0
r.UpgradedReplicas = 0
r.UpgradedReadyReplicas = 0
}
// SetRolloutCondition sets the supplied condition, replacing any existing condition
// of the same type unless they are identical.
func (r *RolloutStatus) SetRolloutCondition(new condition.Condition) {
exists := false
for i, existing := range r.Conditions {
if existing.Type != new.Type {
continue
}
// we want to update the condition when the LTT changes
if existing.Type == new.Type &&
existing.Status == new.Status &&
existing.Reason == new.Reason &&
existing.Message == new.Message &&
existing.LastTransitionTime == new.LastTransitionTime {
exists = true
continue
}
r.Conditions[i] = new
exists = true
}
if !exists {
r.Conditions = append(r.Conditions, new)
}
}
// we can't panic since it will crash the other controllers
func (r *RolloutStatus) illegalStateTransition(err error) {
r.RolloutFailed(err.Error())
}
// StateTransition is the center place to do rollout state transition
// it returns an error if the transition is invalid
// it changes the coming rollout state if it's valid
func (r *RolloutStatus) StateTransition(event RolloutEvent) {
rollingState := r.RollingState
batchRollingState := r.BatchRollingState
defer func() {
klog.InfoS("try to execute a rollout state transition",
"pre rolling state", rollingState,
"pre batch rolling state", batchRollingState,
"post rolling state", r.RollingState,
"post batch rolling state", r.BatchRollingState)
}()
// we have special transition for these types of event since they require additional info
if event == RollingFailedEvent || event == RollingRetriableFailureEvent {
r.illegalStateTransition(fmt.Errorf(invalidRollingStateTransition, rollingState, event))
return
}
// special handle modified event here
if event == RollingModifiedEvent {
if r.RollingState == RolloutDeletingState {
r.illegalStateTransition(fmt.Errorf(invalidRollingStateTransition, rollingState, event))
return
}
if r.RollingState == RolloutFailedState || r.RollingState == RolloutSucceedState {
r.ResetStatus()
} else {
r.SetRolloutCondition(NewNegativeCondition(r.getRolloutConditionType(), "Rollout Spec is modified"))
r.RollingState = RolloutAbandoningState
r.BatchRollingState = BatchInitializingState
}
return
}
// special handle deleted event here, it can happen at many states
if event == RollingDeletedEvent {
if r.RollingState == RolloutFailedState || r.RollingState == RolloutSucceedState {
r.illegalStateTransition(fmt.Errorf(invalidRollingStateTransition, rollingState, event))
return
}
r.SetRolloutCondition(NewNegativeCondition(r.getRolloutConditionType(), "Rollout is being deleted"))
r.RollingState = RolloutDeletingState
r.BatchRollingState = BatchInitializingState
return
}
// special handle appLocatedEvent event here, it only applies to one state but it's legal to happen at other states
if event == AppLocatedEvent {
if r.RollingState == LocatingTargetAppState {
r.RollingState = VerifyingSpecState
}
return
}
switch rollingState {
case VerifyingSpecState:
if event == RollingSpecVerifiedEvent {
r.SetRolloutCondition(NewPositiveCondition(r.getRolloutConditionType()))
r.RollingState = InitializingState
return
}
r.illegalStateTransition(fmt.Errorf(invalidRollingStateTransition, rollingState, event))
case InitializingState:
if event == RollingInitializedEvent {
r.SetRolloutCondition(NewPositiveCondition(r.getRolloutConditionType()))
r.RollingState = RollingInBatchesState
r.BatchRollingState = BatchInitializingState
return
}
r.illegalStateTransition(fmt.Errorf(invalidRollingStateTransition, rollingState, event))
case RollingInBatchesState:
r.batchStateTransition(event)
return
case RolloutAbandoningState:
if event == RollingFinalizedEvent {
r.SetRolloutCondition(NewPositiveCondition(r.getRolloutConditionType()))
r.ResetStatus()
return
}
r.illegalStateTransition(fmt.Errorf(invalidRollingStateTransition, rollingState, event))
case RolloutDeletingState:
if event == RollingFinalizedEvent {
r.SetRolloutCondition(NewPositiveCondition(r.getRolloutConditionType()))
r.RollingState = RolloutFailedState
return
}
r.illegalStateTransition(fmt.Errorf(invalidRollingStateTransition, rollingState, event))
case FinalisingState:
if event == RollingFinalizedEvent {
r.SetRolloutCondition(NewPositiveCondition(r.getRolloutConditionType()))
r.RollingState = RolloutSucceedState
return
}
r.illegalStateTransition(fmt.Errorf(invalidRollingStateTransition, rollingState, event))
case RolloutFailingState:
if event == RollingFinalizedEvent {
r.SetRolloutCondition(NewPositiveCondition(r.getRolloutConditionType()))
r.RollingState = RolloutFailedState
return
}
r.illegalStateTransition(fmt.Errorf(invalidRollingStateTransition, rollingState, event))
case RolloutSucceedState, RolloutFailedState:
r.illegalStateTransition(fmt.Errorf(invalidRollingStateTransition, rollingState, event))
default:
r.illegalStateTransition(fmt.Errorf("invalid rolling state %s before transition", rollingState))
}
}
// batchStateTransition handles the state transition when the rollout is in action
func (r *RolloutStatus) batchStateTransition(event RolloutEvent) {
batchRollingState := r.BatchRollingState
if event == BatchRolloutFailedEvent {
r.BatchRollingState = BatchRolloutFailedState
r.RollingState = RolloutFailedState
r.SetConditions(NewNegativeCondition(r.getRolloutConditionType(), "failed"))
return
}
switch batchRollingState {
case BatchInitializingState:
if event == InitializedOneBatchEvent {
r.BatchRollingState = BatchInRollingState
return
}
r.illegalStateTransition(fmt.Errorf(invalidBatchRollingStateTransition, batchRollingState, event))
case BatchInRollingState:
if event == RolloutOneBatchEvent {
r.SetRolloutCondition(NewPositiveCondition(r.getRolloutConditionType()))
r.BatchRollingState = BatchVerifyingState
return
}
r.illegalStateTransition(fmt.Errorf(invalidBatchRollingStateTransition, batchRollingState, event))
case BatchVerifyingState:
if event == OneBatchAvailableEvent {
r.SetRolloutCondition(NewPositiveCondition(r.getRolloutConditionType()))
r.BatchRollingState = BatchFinalizingState
return
}
r.illegalStateTransition(fmt.Errorf(invalidBatchRollingStateTransition, batchRollingState, event))
case BatchFinalizingState:
if event == FinishedOneBatchEvent {
r.SetRolloutCondition(NewPositiveCondition(r.getRolloutConditionType()))
r.BatchRollingState = BatchReadyState
return
}
if event == AllBatchFinishedEvent {
r.SetRolloutCondition(NewPositiveCondition(r.getRolloutConditionType()))
// transition out of the batch loop
r.BatchRollingState = BatchReadyState
r.RollingState = FinalisingState
return
}
r.illegalStateTransition(fmt.Errorf(invalidBatchRollingStateTransition, batchRollingState, event))
case BatchReadyState:
if event == BatchRolloutApprovedEvent {
r.SetRolloutCondition(NewPositiveCondition(r.getRolloutConditionType()))
r.BatchRollingState = BatchInitializingState
r.CurrentBatch++
return
}
r.illegalStateTransition(fmt.Errorf(invalidBatchRollingStateTransition, batchRollingState, event))
default:
r.illegalStateTransition(fmt.Errorf("invalid batch rolling state %s", batchRollingState))
}
}

View File

@ -1,77 +0,0 @@
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// Rollout is the Schema for the Rollout API
// +kubebuilder:object:root=true
// +kubebuilder:resource:categories={oam},shortName=rollout
// +kubebuilder:subresource:status
// +kubebuilder:storageversion
// +kubebuilder:printcolumn:name="TARGET",type=string,JSONPath=`.status.rolloutTargetSize`
// +kubebuilder:printcolumn:name="UPGRADED",type=string,JSONPath=`.status.upgradedReplicas`
// +kubebuilder:printcolumn:name="READY",type=string,JSONPath=`.status.upgradedReadyReplicas`
// +kubebuilder:printcolumn:name="BATCH-STATE",type=string,JSONPath=`.status.batchRollingState`
// +kubebuilder:printcolumn:name="ROLLING-STATE",type=string,JSONPath=`.status.rollingState`
// +kubebuilder:printcolumn:name="AGE",type=date,JSONPath=".metadata.creationTimestamp"
type Rollout struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec RolloutSpec `json:"spec,omitempty"`
Status CompRolloutStatus `json:"status,omitempty"`
}
// RolloutSpec defines how to describe an update between different compRevision
type RolloutSpec struct {
// TargetRevisionName contains the name of the componentRevisionName that we need to upgrade to.
TargetRevisionName string `json:"targetRevisionName"`
// SourceRevisionName contains the name of the componentRevisionName that we need to upgrade from.
// it can be empty only when it's the first time to deploy the application
SourceRevisionName string `json:"sourceRevisionName,omitempty"`
// ComponentName specify the component name
ComponentName string `json:"componentName"`
// RolloutPlan is the details on how to rollout the resources
RolloutPlan RolloutPlan `json:"rolloutPlan"`
}
// CompRolloutStatus defines the observed state of rollout
type CompRolloutStatus struct {
RolloutStatus `json:",inline"`
// LastUpgradedTargetRevision contains the name of the componentRevisionName that we upgraded to
// We will restart the rollout if this is not the same as the spec
LastUpgradedTargetRevision string `json:"lastTargetRevision"`
// LastSourceRevision contains the name of the componentRevisionName that we need to upgrade from.
// We will restart the rollout if this is not the same as the spec
LastSourceRevision string `json:"LastSourceRevision,omitempty"`
}
// RolloutList contains a list of Rollout
// +kubebuilder:object:root=true
type RolloutList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []Rollout `json:"items"`
}

View File

@ -1,334 +0,0 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright 2023 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by controller-gen. DO NOT EDIT.
package v1alpha1
import (
v1 "k8s.io/api/core/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CanaryMetric) DeepCopyInto(out *CanaryMetric) {
*out = *in
if in.MetricsRange != nil {
in, out := &in.MetricsRange, &out.MetricsRange
*out = new(MetricsExpectedRange)
(*in).DeepCopyInto(*out)
}
if in.TemplateRef != nil {
in, out := &in.TemplateRef, &out.TemplateRef
*out = new(v1.ObjectReference)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CanaryMetric.
func (in *CanaryMetric) DeepCopy() *CanaryMetric {
if in == nil {
return nil
}
out := new(CanaryMetric)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CompRolloutStatus) DeepCopyInto(out *CompRolloutStatus) {
*out = *in
in.RolloutStatus.DeepCopyInto(&out.RolloutStatus)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CompRolloutStatus.
func (in *CompRolloutStatus) DeepCopy() *CompRolloutStatus {
if in == nil {
return nil
}
out := new(CompRolloutStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MetricsExpectedRange) DeepCopyInto(out *MetricsExpectedRange) {
*out = *in
if in.Min != nil {
in, out := &in.Min, &out.Min
*out = new(intstr.IntOrString)
**out = **in
}
if in.Max != nil {
in, out := &in.Max, &out.Max
*out = new(intstr.IntOrString)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsExpectedRange.
func (in *MetricsExpectedRange) DeepCopy() *MetricsExpectedRange {
if in == nil {
return nil
}
out := new(MetricsExpectedRange)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Rollout) DeepCopyInto(out *Rollout) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Rollout.
func (in *Rollout) DeepCopy() *Rollout {
if in == nil {
return nil
}
out := new(Rollout)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Rollout) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RolloutBatch) DeepCopyInto(out *RolloutBatch) {
*out = *in
out.Replicas = in.Replicas
if in.PodList != nil {
in, out := &in.PodList, &out.PodList
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.MaxUnavailable != nil {
in, out := &in.MaxUnavailable, &out.MaxUnavailable
*out = new(intstr.IntOrString)
**out = **in
}
if in.InstanceInterval != nil {
in, out := &in.InstanceInterval, &out.InstanceInterval
*out = new(int32)
**out = **in
}
if in.BatchRolloutWebhooks != nil {
in, out := &in.BatchRolloutWebhooks, &out.BatchRolloutWebhooks
*out = make([]RolloutWebhook, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.CanaryMetric != nil {
in, out := &in.CanaryMetric, &out.CanaryMetric
*out = make([]CanaryMetric, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolloutBatch.
func (in *RolloutBatch) DeepCopy() *RolloutBatch {
if in == nil {
return nil
}
out := new(RolloutBatch)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RolloutList) DeepCopyInto(out *RolloutList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Rollout, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolloutList.
func (in *RolloutList) DeepCopy() *RolloutList {
if in == nil {
return nil
}
out := new(RolloutList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *RolloutList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RolloutPlan) DeepCopyInto(out *RolloutPlan) {
*out = *in
if in.TargetSize != nil {
in, out := &in.TargetSize, &out.TargetSize
*out = new(int32)
**out = **in
}
if in.NumBatches != nil {
in, out := &in.NumBatches, &out.NumBatches
*out = new(int32)
**out = **in
}
if in.RolloutBatches != nil {
in, out := &in.RolloutBatches, &out.RolloutBatches
*out = make([]RolloutBatch, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.BatchPartition != nil {
in, out := &in.BatchPartition, &out.BatchPartition
*out = new(int32)
**out = **in
}
if in.RolloutWebhooks != nil {
in, out := &in.RolloutWebhooks, &out.RolloutWebhooks
*out = make([]RolloutWebhook, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.CanaryMetric != nil {
in, out := &in.CanaryMetric, &out.CanaryMetric
*out = make([]CanaryMetric, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolloutPlan.
func (in *RolloutPlan) DeepCopy() *RolloutPlan {
if in == nil {
return nil
}
out := new(RolloutPlan)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RolloutSpec) DeepCopyInto(out *RolloutSpec) {
*out = *in
in.RolloutPlan.DeepCopyInto(&out.RolloutPlan)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolloutSpec.
func (in *RolloutSpec) DeepCopy() *RolloutSpec {
if in == nil {
return nil
}
out := new(RolloutSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RolloutStatus) DeepCopyInto(out *RolloutStatus) {
*out = *in
in.ConditionedStatus.DeepCopyInto(&out.ConditionedStatus)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolloutStatus.
func (in *RolloutStatus) DeepCopy() *RolloutStatus {
if in == nil {
return nil
}
out := new(RolloutStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RolloutWebhook) DeepCopyInto(out *RolloutWebhook) {
*out = *in
if in.ExpectedStatus != nil {
in, out := &in.ExpectedStatus, &out.ExpectedStatus
*out = make([]int, len(*in))
copy(*out, *in)
}
if in.Metadata != nil {
in, out := &in.Metadata, &out.Metadata
*out = new(map[string]string)
if **in != nil {
in, out := *in, *out
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolloutWebhook.
func (in *RolloutWebhook) DeepCopy() *RolloutWebhook {
if in == nil {
return nil
}
out := new(RolloutWebhook)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RolloutWebhookPayload) DeepCopyInto(out *RolloutWebhookPayload) {
*out = *in
if in.Metadata != nil {
in, out := &in.Metadata, &out.Metadata
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolloutWebhookPayload.
func (in *RolloutWebhookPayload) DeepCopy() *RolloutWebhookPayload {
if in == nil {
return nil
}
out := new(RolloutWebhookPayload)
in.DeepCopyInto(out)
return out
}

View File

@ -64,8 +64,6 @@ const (
TypeWorkload CapType = "workload"
// TypeTrait represents OAM Trait
TypeTrait CapType = "trait"
// TypeScope represent OAM Scope
TypeScope CapType = "scope"
// TypeWorkflowStep represent OAM Workflow
TypeWorkflowStep CapType = "workflowstep"
// TypePolicy represent OAM Policy

View File

@ -17,22 +17,19 @@ limitations under the License.
package types
import (
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
// ComponentManifest contains resources rendered from an application component.
type ComponentManifest struct {
Name string
Namespace string
RevisionName string
RevisionHash string
ExternalRevision string
Name string
Namespace string
RevisionName string
RevisionHash string
// StandardWorkload contains K8s resource generated from "output" block of ComponentDefinition
StandardWorkload *unstructured.Unstructured
// Traits contains both resources generated from "outputs" block of ComponentDefinition and resources generated from TraitDefinition
Traits []*unstructured.Unstructured
Scopes []*corev1.ObjectReference
// PackagedWorkloadResources contain all the workload related resources. It could be a Helm
// Release, Git Repo or anything that can package and run a workload.

View File

@ -23,19 +23,14 @@ const (
ReasonPolicyGenerated = "PolicyGenerated"
ReasonRevisoned = "Revisioned"
ReasonApplied = "Applied"
ReasonHealthCheck = "HealthChecked"
ReasonDeployed = "Deployed"
ReasonRollout = "Rollout"
ReasonFailedParse = "FailedParse"
ReasonFailedRender = "FailedRender"
ReasonFailedRevision = "FailedRevision"
ReasonFailedWorkflow = "FailedWorkflow"
ReasonFailedApply = "FailedApply"
ReasonFailedHealthCheck = "FailedHealthCheck"
ReasonFailedStateKeep = "FailedStateKeep"
ReasonFailedGC = "FailedGC"
ReasonFailedRollout = "FailedRollout"
ReasonFailedParse = "FailedParse"
ReasonFailedRevision = "FailedRevision"
ReasonFailedWorkflow = "FailedWorkflow"
ReasonFailedApply = "FailedApply"
ReasonFailedStateKeep = "FailedStateKeep"
ReasonFailedGC = "FailedGC"
)
// event message for Application
@ -44,16 +39,6 @@ const (
MessageRendered = "Rendered successfully"
MessagePolicyGenerated = "Policy generated successfully"
MessageRevisioned = "Revisioned successfully"
MessageApplied = "Applied successfully"
MessageWorkflowFinished = "Workflow finished"
MessageHealthCheck = "Health checked healthy"
MessageDeployed = "Deployed successfully"
MessageRollout = "Rollout successfully"
MessageFailedParse = "fail to parse application, err: %v"
MessageFailedRender = "fail to render application, err: %v"
MessageFailedRevision = "fail to handle application revision, err: %v"
MessageFailedApply = "fail to apply component, err: %v"
MessageFailedHealthCheck = "fail to health check, err: %v"
MessageFailedGC = "fail to garbage collection, err: %v"
)

View File

@ -1,31 +0,0 @@
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package types
// RollingStatus represents the rollout phases
type RollingStatus string
const (
// RollingTemplating means that the AC is rolling and need template
RollingTemplating RollingStatus = "RollingTemplating"
// RollingTemplated means that the AC is rolling and it already templated
RollingTemplated RollingStatus = "RollingTemplated"
// RollingCompleted means that the AC is the new active revision of the application
RollingCompleted RollingStatus = "RollingCompleted"
// InactiveAfterRollingCompleted means that the AC is the inactive revision after the rolling is finished
InactiveAfterRollingCompleted RollingStatus = "InactiveAfterRollingCompleted"
)

View File

@ -64,8 +64,6 @@ const (
LabelDefinitionDeprecated = "custom.definition.oam.dev/deprecated"
// LabelDefinitionHidden is the label which describe whether the capability is hidden by UI
LabelDefinitionHidden = "custom.definition.oam.dev/ui-hidden"
// LabelDefinitionScope is the label which describe whether the capability's scope
LabelDefinitionScope = "custom.definition.oam.dev/scope"
// LabelNodeRoleGateway gateway role of node
LabelNodeRoleGateway = "node-role.kubernetes.io/gateway"
// LabelNodeRoleWorker worker role of node
@ -157,8 +155,6 @@ const LabelArg = "label"
// DefaultFilterAnnots are annotations that won't pass to workload or trait
var DefaultFilterAnnots = []string{
oam.AnnotationAppRollout,
oam.AnnotationRollingComponent,
oam.AnnotationInplaceUpgrade,
oam.AnnotationFilterLabelKeys,
oam.AnnotationFilterAnnotationKeys,

View File

@ -38,16 +38,13 @@ helm install --create-namespace -n vela-system kubevela kubevela/vela-core --wai
### KubeVela core parameters
| Name | Description | Value |
| ----------------------------- | --------------------------------------------------------------------------------------------- | ------ |
| `systemDefinitionNamespace` | System definition namespace, if unspecified, will use built-in variable `.Release.Namespace`. | `nil` |
| `applicationRevisionLimit` | Application revision limit | `2` |
| `definitionRevisionLimit` | Definition revision limit | `2` |
| `concurrentReconciles` | concurrentReconciles is the concurrent reconcile number of the controller | `4` |
| `controllerArgs.reSyncPeriod` | The period for resync the applications | `5m` |
| `OAMSpecVer` | OAMSpecVer is the oam spec version controller want to setup | `v0.3` |
| `disableCaps` | Disable capability | `all` |
| `dependCheckWait` | dependCheckWait is the time to wait for ApplicationConfiguration's dependent-resource ready | `30s` |
| Name | Description | Value |
| ----------------------------- | --------------------------------------------------------------------------------------------- | ----- |
| `systemDefinitionNamespace` | System definition namespace, if unspecified, will use built-in variable `.Release.Namespace`. | `nil` |
| `applicationRevisionLimit` | Application revision limit | `2` |
| `definitionRevisionLimit` | Definition revision limit | `2` |
| `concurrentReconciles` | concurrentReconciles is the concurrent reconcile number of the controller | `4` |
| `controllerArgs.reSyncPeriod` | The period for resync the applications | `5m` |
### KubeVela workflow parameters
@ -86,7 +83,6 @@ helm install --create-namespace -n vela-system kubevela kubevela/vela-core --wai
| `optimize.enableInMemoryWorkflowContext` | Optimize workflow by use in-memory context. | `false` |
| `optimize.disableResourceApplyDoubleCheck` | Optimize workflow by ignoring resource double check after apply. | `false` |
| `optimize.enableResourceTrackerDeleteOnlyTrigger` | Optimize resourcetracker by only trigger reconcile when resourcetracker is deleted. | `true` |
| `featureGates.enableLegacyComponentRevision` | if disabled, only component with rollout trait will create component revisions | `false` |
| `featureGates.gzipResourceTracker` | compress ResourceTracker using gzip (good) before being stored. This is reduces network throughput when dealing with huge ResourceTrackers. | `false` |
| `featureGates.zstdResourceTracker` | compress ResourceTracker using zstd (fast and good) before being stored. This is reduces network throughput when dealing with huge ResourceTrackers. Note that zstd will be prioritized if you enable other compression options. | `true` |
| `featureGates.applyOnce` | if enabled, the apply-once feature will be applied to all applications, no state-keep and no resource data storage in ResourceTracker | `false` |

View File

@ -1581,104 +1581,6 @@ spec:
x-kubernetes-preserve-unknown-fields: true
type: array
x-kubernetes-preserve-unknown-fields: true
scopeDefinitions:
additionalProperties:
description: A ScopeDefinition registers a kind of Kubernetes custom
resource as a valid OAM scope kind by referencing its CustomResourceDefinition.
The CRD is used to validate the schema of the scope when it is
embedded in an OAM ApplicationConfiguration.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this
representation of an object. Servers should convert recognized
schemas to the latest internal value, and may reject unrecognized
values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource
this object represents. Servers may infer this from the endpoint
the client submits requests to. Cannot be updated. In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
properties:
annotations:
additionalProperties:
type: string
type: object
finalizers:
items:
type: string
type: array
labels:
additionalProperties:
type: string
type: object
name:
type: string
namespace:
type: string
type: object
spec:
description: A ScopeDefinitionSpec defines the desired state
of a ScopeDefinition.
properties:
allowComponentOverlap:
description: AllowComponentOverlap specifies whether an
OAM component may exist in multiple instances of this
kind of scope.
type: boolean
definitionRef:
description: Reference to the CustomResourceDefinition that
defines this scope kind.
properties:
name:
description: Name of the referenced CustomResourceDefinition.
type: string
version:
description: Version indicate which version should be
used if CRD has multiple versions by default it will
use the first one if not specified
type: string
required:
- name
type: object
extension:
description: Extension is used for extension needs by OAM
platform builders
type: object
x-kubernetes-preserve-unknown-fields: true
workloadRefsPath:
description: WorkloadRefsPath indicates if/where a scope
accepts workloadRef objects
type: string
required:
- allowComponentOverlap
- definitionRef
type: object
type: object
description: ScopeDefinitions records the snapshot of the scopeDefinitions
related with the created/modified Application
type: object
scopeGVK:
additionalProperties:
description: GroupVersionKind unambiguously identifies a kind. It
doesn't anonymously include GroupVersion to avoid automatic coercion. It
doesn't use a GroupVersion to avoid custom marshalling
properties:
group:
type: string
kind:
type: string
version:
type: string
required:
- group
- kind
- version
type: object
description: ScopeGVK records the apiVersion to GVK mapping
type: object
traitDefinitions:
additionalProperties:
description: A TraitDefinition registers a kind of Kubernetes custom

View File

@ -1,83 +0,0 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.11.4
name: scopedefinitions.core.oam.dev
spec:
group: core.oam.dev
names:
categories:
- oam
kind: ScopeDefinition
listKind: ScopeDefinitionList
plural: scopedefinitions
shortNames:
- scope
singular: scopedefinition
scope: Namespaced
versions:
- additionalPrinterColumns:
- jsonPath: .spec.definitionRef.name
name: DEFINITION-NAME
type: string
name: v1beta1
schema:
openAPIV3Schema:
description: A ScopeDefinition registers a kind of Kubernetes custom resource
as a valid OAM scope kind by referencing its CustomResourceDefinition. The
CRD is used to validate the schema of the scope when it is embedded in an
OAM ApplicationConfiguration.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: A ScopeDefinitionSpec defines the desired state of a ScopeDefinition.
properties:
allowComponentOverlap:
description: AllowComponentOverlap specifies whether an OAM component
may exist in multiple instances of this kind of scope.
type: boolean
definitionRef:
description: Reference to the CustomResourceDefinition that defines
this scope kind.
properties:
name:
description: Name of the referenced CustomResourceDefinition.
type: string
version:
description: Version indicate which version should be used if
CRD has multiple versions by default it will use the first one
if not specified
type: string
required:
- name
type: object
extension:
description: Extension is used for extension needs by OAM platform
builders
type: object
x-kubernetes-preserve-unknown-fields: true
workloadRefsPath:
description: WorkloadRefsPath indicates if/where a scope accepts workloadRef
objects
type: string
required:
- allowComponentOverlap
- definitionRef
type: object
type: object
served: true
storage: true
subresources: {}

View File

@ -1,477 +0,0 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.11.4
name: rollouts.standard.oam.dev
spec:
group: standard.oam.dev
names:
categories:
- oam
kind: Rollout
listKind: RolloutList
plural: rollouts
shortNames:
- rollout
singular: rollout
scope: Namespaced
versions:
- additionalPrinterColumns:
- jsonPath: .status.rolloutTargetSize
name: TARGET
type: string
- jsonPath: .status.upgradedReplicas
name: UPGRADED
type: string
- jsonPath: .status.upgradedReadyReplicas
name: READY
type: string
- jsonPath: .status.batchRollingState
name: BATCH-STATE
type: string
- jsonPath: .status.rollingState
name: ROLLING-STATE
type: string
- jsonPath: .metadata.creationTimestamp
name: AGE
type: date
name: v1alpha1
schema:
openAPIV3Schema:
description: Rollout is the Schema for the Rollout API
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: RolloutSpec defines how to describe an update between different
compRevision
properties:
componentName:
description: ComponentName specify the component name
type: string
rolloutPlan:
description: RolloutPlan is the details on how to rollout the resources
properties:
batchPartition:
description: All pods in the batches up to the batchPartition
(included) will have the target resource specification while
the rest still have the source resource This is designed for
the operators to manually rollout Default is the the number
of batches which will rollout all the batches
format: int32
type: integer
canaryMetric:
description: CanaryMetric provides a way for the rollout process
to automatically check certain metrics before complete the process
items:
description: CanaryMetric holds the reference to metrics used
for canary analysis
properties:
interval:
description: Interval represents the windows size
type: string
metricsRange:
description: Range value accepted for this metric
properties:
max:
anyOf:
- type: integer
- type: string
description: Maximum value
x-kubernetes-int-or-string: true
min:
anyOf:
- type: integer
- type: string
description: Minimum value
x-kubernetes-int-or-string: true
type: object
name:
description: Name of the metric
type: string
templateRef:
description: TemplateRef references a metric template object
properties:
apiVersion:
description: API version of the referent.
type: string
fieldPath:
description: 'If referring to a piece of an object instead
of an entire object, this string should contain a
valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
For example, if the object reference is to a container
within a pod, this would take on a value like: "spec.containers{name}"
(where "name" refers to the name of the container
that triggered the event) or if no container name
is specified "spec.containers[2]" (container with
index 2 in this pod). This syntax is chosen only to
have some well-defined way of referencing a part of
an object. TODO: this design is not final and this
field is subject to change in the future.'
type: string
kind:
description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
type: string
namespace:
description: 'Namespace of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/'
type: string
resourceVersion:
description: 'Specific resourceVersion to which this
reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency'
type: string
uid:
description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids'
type: string
type: object
x-kubernetes-map-type: atomic
required:
- name
type: object
type: array
numBatches:
description: The number of batches, default = 1
format: int32
type: integer
paused:
description: Paused the rollout, default is false
type: boolean
rolloutBatches:
description: The exact distribution among batches. its size has
to be exactly the same as the NumBatches (if set) The total
number cannot exceed the targetSize or the size of the source
resource We will IGNORE the last batch's replica field if it's
a percentage since round errors can lead to inaccurate sum We
highly recommend to leave the last batch's replica field empty
items:
description: RolloutBatch is used to describe how the each batch
rollout should be
properties:
batchRolloutWebhooks:
description: RolloutWebhooks provides a way for the batch
rollout to interact with an external process
items:
description: RolloutWebhook holds the reference to external
checks used for canary analysis
properties:
expectedStatus:
description: ExpectedStatus contains all the expected
http status code that we will accept as success
items:
type: integer
type: array
metadata:
additionalProperties:
type: string
description: Metadata (key-value pairs) for this webhook
type: object
method:
description: Method the HTTP call method, default
is POST
type: string
name:
description: Name of this webhook
type: string
type:
description: Type of this webhook
type: string
url:
description: URL address of this webhook
type: string
required:
- name
- type
- url
type: object
type: array
canaryMetric:
description: CanaryMetric provides a way for the batch rollout
process to automatically check certain metrics before
moving to the next batch
items:
description: CanaryMetric holds the reference to metrics
used for canary analysis
properties:
interval:
description: Interval represents the windows size
type: string
metricsRange:
description: Range value accepted for this metric
properties:
max:
anyOf:
- type: integer
- type: string
description: Maximum value
x-kubernetes-int-or-string: true
min:
anyOf:
- type: integer
- type: string
description: Minimum value
x-kubernetes-int-or-string: true
type: object
name:
description: Name of the metric
type: string
templateRef:
description: TemplateRef references a metric template
object
properties:
apiVersion:
description: API version of the referent.
type: string
fieldPath:
description: 'If referring to a piece of an object
instead of an entire object, this string should
contain a valid JSON/Go field access statement,
such as desiredState.manifest.containers[2].
For example, if the object reference is to a
container within a pod, this would take on a
value like: "spec.containers{name}" (where "name"
refers to the name of the container that triggered
the event) or if no container name is specified
"spec.containers[2]" (container with index 2
in this pod). This syntax is chosen only to
have some well-defined way of referencing a
part of an object. TODO: this design is not
final and this field is subject to change in
the future.'
type: string
kind:
description: 'Kind of the referent. More info:
https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
type: string
namespace:
description: 'Namespace of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/'
type: string
resourceVersion:
description: 'Specific resourceVersion to which
this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency'
type: string
uid:
description: 'UID of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids'
type: string
type: object
x-kubernetes-map-type: atomic
required:
- name
type: object
type: array
instanceInterval:
description: The wait time, in seconds, between instances
upgrades, default = 0
format: int32
type: integer
maxUnavailable:
anyOf:
- type: integer
- type: string
description: MaxUnavailable is the max allowed number of
pods that is unavailable during the upgrade. We will mark
the batch as ready as long as there are less or equal
number of pods unavailable than this number. default =
0
x-kubernetes-int-or-string: true
podList:
description: The list of Pods to get upgraded it is mutually
exclusive with the Replicas field
items:
type: string
type: array
replicas:
anyOf:
- type: integer
- type: string
description: 'Replicas is the number of pods to upgrade
in this batch it can be an absolute number (ex: 5) or
a percentage of total pods we will ignore the percentage
of the last batch to just fill the gap it is mutually
exclusive with the PodList field'
x-kubernetes-int-or-string: true
type: object
type: array
rolloutStrategy:
description: RolloutStrategy defines strategies for the rollout
plan The default is IncreaseFirstRolloutStrategyType
type: string
rolloutWebhooks:
description: RolloutWebhooks provide a way for the rollout to
interact with an external process
items:
description: RolloutWebhook holds the reference to external
checks used for canary analysis
properties:
expectedStatus:
description: ExpectedStatus contains all the expected http
status code that we will accept as success
items:
type: integer
type: array
metadata:
additionalProperties:
type: string
description: Metadata (key-value pairs) for this webhook
type: object
method:
description: Method the HTTP call method, default is POST
type: string
name:
description: Name of this webhook
type: string
type:
description: Type of this webhook
type: string
url:
description: URL address of this webhook
type: string
required:
- name
- type
- url
type: object
type: array
targetSize:
description: The size of the target resource. The default is the
same as the size of the source resource.
format: int32
type: integer
type: object
sourceRevisionName:
description: SourceRevisionName contains the name of the componentRevisionName that
we need to upgrade from. it can be empty only when it's the first
time to deploy the application
type: string
targetRevisionName:
description: TargetRevisionName contains the name of the componentRevisionName
that we need to upgrade to.
type: string
required:
- componentName
- rolloutPlan
- targetRevisionName
type: object
status:
description: CompRolloutStatus defines the observed state of rollout
properties:
LastSourceRevision:
description: LastSourceRevision contains the name of the componentRevisionName
that we need to upgrade from. We will restart the rollout if this
is not the same as the spec
type: string
batchRollingState:
description: BatchRollingState only meaningful when the Status is
rolling
type: string
conditions:
description: Conditions of the resource.
items:
description: A Condition that may apply to a resource.
properties:
lastTransitionTime:
description: LastTransitionTime is the last time this condition
transitioned from one status to another.
format: date-time
type: string
message:
description: A Message containing details about this condition's
last transition from one status to another, if any.
type: string
reason:
description: A Reason for this condition's last transition from
one status to another.
type: string
status:
description: Status of this condition; is it currently True,
False, or Unknown?
type: string
type:
description: Type of this condition. At most one of each condition
type may apply to a resource at any point in time.
type: string
required:
- lastTransitionTime
- reason
- status
- type
type: object
type: array
currentBatch:
description: The current batch the rollout is working on/blocked it
starts from 0
format: int32
type: integer
lastAppliedPodTemplateIdentifier:
description: lastAppliedPodTemplateIdentifier is a string that uniquely
represent the last pod template each workload type could use different
ways to identify that so we cannot compare between resources We
update this field only after a successful rollout
type: string
lastTargetRevision:
description: LastUpgradedTargetRevision contains the name of the componentRevisionName
that we upgraded to We will restart the rollout if this is not the
same as the spec
type: string
rollingState:
description: RollingState is the Rollout State
type: string
rolloutOriginalSize:
description: RolloutTargetSize is the size of the target resources.
This is determined once the initial spec verification and does not
change until the rollout is restarted
format: int32
type: integer
rolloutTargetSize:
description: RolloutTargetSize is the size of the target resources.
This is determined once the initial spec verification and does not
change until the rollout is restarted
format: int32
type: integer
targetGeneration:
description: NewPodTemplateIdentifier is a string that uniquely represent
the new pod template each workload type could use different ways
to identify that so we cannot compare between resources
type: string
upgradedReadyReplicas:
description: UpgradedReadyReplicas is the number of Pods upgraded
by the rollout controller that have a Ready Condition.
format: int32
type: integer
upgradedReplicas:
description: UpgradedReplicas is the number of Pods upgraded by the
rollout controller
format: int32
type: integer
required:
- currentBatch
- lastTargetRevision
- rollingState
- upgradedReadyReplicas
- upgradedReplicas
type: object
type: object
served: true
storage: true
subresources:
status: {}

View File

@ -9,117 +9,6 @@ metadata:
cert-manager.io/inject-ca-from: {{ printf "%s/%s-root-cert" .Release.Namespace (include "kubevela.fullname" .) | quote }}
{{- end }}
webhooks:
{{- if eq .Values.OAMSpecVer "all"}}
- clientConfig:
caBundle: Cg==
service:
name: {{ template "kubevela.name" . }}-webhook
namespace: {{ .Release.Namespace }}
path: /mutating-core-oam-dev-v1alpha2-applicationconfigurations
{{- if .Values.admissionWebhooks.patch.enabled }}
failurePolicy: Ignore
{{- else }}
failurePolicy: Fail
{{- end }}
name: mutating.core.oam.dev.v1alpha2.applicationconfigurations
sideEffects: None
rules:
- apiGroups:
- core.oam.dev
apiVersions:
- v1alpha2
operations:
- CREATE
- UPDATE
resources:
- applicationconfigurations
scope: Namespaced
admissionReviewVersions:
- v1beta1
- v1
timeoutSeconds: 5
- clientConfig:
caBundle: Cg==
service:
name: {{ template "kubevela.name" . }}-webhook
namespace: {{ .Release.Namespace }}
path: /mutating-core-oam-dev-v1alpha2-components
{{- if .Values.admissionWebhooks.patch.enabled }}
failurePolicy: Ignore
{{- else }}
failurePolicy: Fail
{{- end }}
name: mutating.core.oam-dev.v1alpha2.components
sideEffects: None
rules:
- apiGroups:
- core.oam.dev
apiVersions:
- v1alpha2
operations:
- CREATE
- UPDATE
resources:
- components
scope: Namespaced
admissionReviewVersions:
- v1beta1
- v1
timeoutSeconds: 5
{{- end }}
- clientConfig:
caBundle: Cg==
service:
name: {{ template "kubevela.name" . }}-webhook
namespace: {{ .Release.Namespace }}
path: /mutating-core-oam-dev-v1beta1-approllout
{{- if .Values.admissionWebhooks.patch.enabled }}
failurePolicy: Ignore
{{- else }}
failurePolicy: Fail
{{- end }}
name: mutating.core.oam.dev.v1beta1.approllouts
sideEffects: None
rules:
- apiGroups:
- core.oam.dev
apiVersions:
- v1beta1
operations:
- CREATE
- UPDATE
resources:
- approllouts
scope: Namespaced
admissionReviewVersions:
- v1beta1
- v1
timeoutSeconds: 5
- clientConfig:
caBundle: Cg==
service:
name: {{ template "kubevela.name" . }}-webhook
namespace: {{ .Release.Namespace }}
path: /mutate-standard-oam-dev-v1alpha1-podspecworkload
{{- if .Values.admissionWebhooks.patch.enabled }}
failurePolicy: Ignore
{{- else }}
failurePolicy: Fail
{{- end }}
name: mcontainerized.kb.io
sideEffects: None
admissionReviewVersions:
- v1beta1
rules:
- apiGroups:
- standard.oam.dev
apiVersions:
- v1alpha1
operations:
- CREATE
- UPDATE
resources:
- podspecworkloads
- clientConfig:
caBundle: Cg==
service:

View File

@ -9,92 +9,6 @@ metadata:
cert-manager.io/inject-ca-from: {{ printf "%s/%s-root-cert" .Release.Namespace (include "kubevela.fullname" .) | quote }}
{{- end }}
webhooks:
{{- if eq .Values.OAMSpecVer "all"}}
- clientConfig:
caBundle: Cg==
service:
name: {{ template "kubevela.name" . }}-webhook
namespace: {{ .Release.Namespace }}
path: /validating-core-oam-dev-v1alpha2-applicationconfigurations
{{- if .Values.admissionWebhooks.patch.enabled }}
failurePolicy: Ignore
{{- else }}
failurePolicy: {{ .Values.admissionWebhooks.failurePolicy }}
{{- end }}
name: validating.core.oam.dev.v1alpha2.applicationconfigurations
sideEffects: None
rules:
- apiGroups:
- core.oam.dev
apiVersions:
- v1alpha2
operations:
- CREATE
- UPDATE
resources:
- applicationconfigurations
scope: Namespaced
admissionReviewVersions:
- v1beta1
- v1
timeoutSeconds: 5
- clientConfig:
caBundle: Cg==
service:
name: {{ template "kubevela.name" . }}-webhook
namespace: {{ .Release.Namespace }}
path: /validating-core-oam-dev-v1alpha2-components
{{- if .Values.admissionWebhooks.patch.enabled }}
failurePolicy: Ignore
{{- else }}
failurePolicy: {{ .Values.admissionWebhooks.failurePolicy }}
{{- end }}
name: validating.core.oam.dev.v1alpha2.components
sideEffects: None
rules:
- apiGroups:
- core.oam.dev
apiVersions:
- v1alpha2
operations:
- CREATE
- UPDATE
resources:
- components
scope: Namespaced
admissionReviewVersions:
- v1beta1
- v1
timeoutSeconds: 5
{{- end }}
- clientConfig:
caBundle: Cg==
service:
name: {{ template "kubevela.name" . }}-webhook
namespace: {{ .Release.Namespace }}
path: /validating-core-oam-dev-v1beta1-approllout
{{- if .Values.admissionWebhooks.patch.enabled }}
failurePolicy: Ignore
{{- else }}
failurePolicy: {{ .Values.admissionWebhooks.failurePolicy }}
{{- end }}
name: validating.core.oam.dev.v1beta1.approllouts
sideEffects: None
rules:
- apiGroups:
- core.oam.dev
apiVersions:
- v1beta1
operations:
- CREATE
- UPDATE
resources:
- approllouts
scope: Namespaced
admissionReviewVersions:
- v1beta1
- v1
timeoutSeconds: 5
- clientConfig:
caBundle: Cg==
service:
@ -123,28 +37,6 @@ webhooks:
- traitdefinitions
scope: Cluster
timeoutSeconds: 5
- clientConfig:
caBundle: Cg==
service:
name: {{ template "kubevela.name" . }}-webhook
namespace: {{ .Release.Namespace }}
path: /validate-standard-oam-dev-v1alpha1-podspecworkload
failurePolicy: Fail
name: vcontainerized.kb.io
admissionReviewVersions:
- v1beta1
- v1
sideEffects: None
rules:
- apiGroups:
- standard.oam.dev
apiVersions:
- v1alpha1
operations:
- CREATE
- UPDATE
resources:
- podspecworkloads
- clientConfig:
caBundle: Cg==
service:
@ -197,31 +89,4 @@ webhooks:
- UPDATE
resources:
- componentdefinitions
- clientConfig:
caBundle: Cg==
service:
name: {{ template "kubevela.name" . }}-webhook
namespace: {{ .Release.Namespace }}
path: /validating-core-oam-dev-v1beta1-initializers
{{- if .Values.admissionWebhooks.patch.enabled }}
failurePolicy: Ignore
{{- else }}
failurePolicy: Fail
{{- end }}
name: validating.core.oam-dev.v1beta1.initializers
sideEffects: None
admissionReviewVersions:
- v1beta1
- v1
rules:
- apiGroups:
- core.oam.dev
apiVersions:
- v1beta1
operations:
- CREATE
- UPDATE
- DELETE
resources:
- initializers
{{- end -}}

View File

@ -269,13 +269,9 @@ spec:
- "--optimize-enable-resource-tracker-delete-only-trigger=false"
{{ end }}
- "--health-addr=:{{ .Values.healthCheck.port }}"
{{ if ne .Values.disableCaps "" }}
- "--disable-caps={{ .Values.disableCaps }}"
{{ end }}
- "--system-definition-namespace={{ include "systemDefinitionNamespace" . }}"
- "--application-revision-limit={{ .Values.applicationRevisionLimit }}"
- "--definition-revision-limit={{ .Values.definitionRevisionLimit }}"
- "--oam-spec-ver={{ .Values.OAMSpecVer }}"
{{ if .Values.multicluster.enabled }}
- "--enable-cluster-gateway"
{{ if .Values.multicluster.clusterGateway.direct }}
@ -297,7 +293,6 @@ spec:
- "--max-workflow-step-error-retry-times={{ .Values.workflow.step.errorRetryTimes }}"
- "--feature-gates=EnableSuspendOnFailure={{- .Values.workflow.enableSuspendOnFailure | toString -}}"
- "--feature-gates=AuthenticateApplication={{- .Values.authentication.enabled | toString -}}"
- "--feature-gates=LegacyComponentRevision={{- .Values.featureGates.enableLegacyComponentRevision | toString -}}"
- "--feature-gates=GzipResourceTracker={{- .Values.featureGates.gzipResourceTracker | toString -}}"
- "--feature-gates=ZstdResourceTracker={{- .Values.featureGates.zstdResourceTracker | toString -}}"
- "--feature-gates=ApplyOnce={{- .Values.featureGates.applyOnce | toString -}}"

View File

@ -20,15 +20,6 @@ concurrentReconciles: 4
controllerArgs:
reSyncPeriod: 5m
## @param OAMSpecVer OAMSpecVer is the oam spec version controller want to setup
OAMSpecVer: "v0.3"
## @param disableCaps Disable capability
disableCaps: "all"
## @param dependCheckWait dependCheckWait is the time to wait for ApplicationConfiguration's dependent-resource ready
dependCheckWait: 30s
## @section KubeVela workflow parameters
@ -115,7 +106,6 @@ optimize:
disableResourceApplyDoubleCheck: false
enableResourceTrackerDeleteOnlyTrigger: true
##@param featureGates.enableLegacyComponentRevision if disabled, only component with rollout trait will create component revisions
##@param featureGates.gzipResourceTracker compress ResourceTracker using gzip (good) before being stored. This is reduces network throughput when dealing with huge ResourceTrackers.
##@param featureGates.zstdResourceTracker compress ResourceTracker using zstd (fast and good) before being stored. This is reduces network throughput when dealing with huge ResourceTrackers. Note that zstd will be prioritized if you enable other compression options.
##@param featureGates.applyOnce if enabled, the apply-once feature will be applied to all applications, no state-keep and no resource data storage in ResourceTracker
@ -131,7 +121,6 @@ optimize:
##@param featureGates.disableWorkflowContextConfigMapCache disable the workflow context's configmap informer cache
##@param
featureGates:
enableLegacyComponentRevision: false
gzipResourceTracker: false
zstdResourceTracker: true
applyOnce: false

View File

@ -49,7 +49,6 @@ type CoreOptions struct {
LogDebug bool
ControllerArgs *oamcontroller.Args
HealthAddr string
DisableCaps string
StorageDriver string
InformerSyncPeriod time.Duration
QPS float64
@ -80,17 +79,13 @@ func NewCoreOptions() *CoreOptions {
RevisionLimit: 50,
AppRevisionLimit: 10,
DefRevisionLimit: 20,
CustomRevisionHookURL: "",
AutoGenWorkloadDefinition: true,
ConcurrentReconciles: 4,
DependCheckWait: 30 * time.Second,
OAMSpecVer: "v0.3",
EnableCompatibility: false,
IgnoreAppWithoutControllerRequirement: false,
IgnoreDefinitionWithoutControllerRequirement: false,
},
HealthAddr: ":9440",
DisableCaps: "all",
StorageDriver: "Local",
InformerSyncPeriod: 10 * time.Hour,
QPS: 50,
@ -124,7 +119,6 @@ func (s *CoreOptions) Flags() cliflag.NamedFlagSets {
gfs.Uint64Var(&s.LogFileMaxSize, "log-file-max-size", s.LogFileMaxSize, "Defines the maximum size a log file can grow to, Unit is megabytes.")
gfs.BoolVar(&s.LogDebug, "log-debug", s.LogDebug, "Enable debug logs for development purpose")
gfs.StringVar(&s.HealthAddr, "health-addr", s.HealthAddr, "The address the health endpoint binds to.")
gfs.StringVar(&s.DisableCaps, "disable-caps", s.DisableCaps, "To be disabled builtin capability list.")
gfs.DurationVar(&s.InformerSyncPeriod, "informer-sync-period", s.InformerSyncPeriod,
"The re-sync period for informer in controller-runtime. This is a system-level configuration.")
gfs.Float64Var(&s.QPS, "kube-api-qps", s.QPS, "the qps for reconcile clients. Low qps may lead to low throughput. High qps may give stress to api-server. Raise this value if concurrent-reconciles is set to be high.")

View File

@ -39,7 +39,6 @@ func TestCoreOptions_Flags(t *testing.T) {
args := []string{
"--application-re-sync-period=5s",
"--cluster-metrics-interval=5s",
"--disable-caps=true",
"--enable-cluster-gateway=true",
"--enable-cluster-metrics=true",
"--enable-leader-election=true",
@ -83,7 +82,6 @@ func TestCoreOptions_Flags(t *testing.T) {
LogDebug: true,
ControllerArgs: &oamcontroller.Args{},
HealthAddr: "/healthz",
DisableCaps: "true",
StorageDriver: "",
InformerSyncPeriod: 3 * time.Second,
QPS: 200,

View File

@ -85,7 +85,6 @@ func NewCoreCommand() *cobra.Command {
meta.Name = types.VelaCoreName
klog.InfoS("KubeVela information", "version", version.VelaVersion, "revision", version.GitRevision)
klog.InfoS("Disable capabilities", "name", s.DisableCaps)
klog.InfoS("Vela-Core init", "definition namespace", oam.SystemDefinitionNamespace)
return cmd
@ -93,7 +92,6 @@ func NewCoreCommand() *cobra.Command {
func run(ctx context.Context, s *options.CoreOptions) error {
klog.InfoS("KubeVela information", "version", version.VelaVersion, "revision", version.GitRevision)
klog.InfoS("Disable capabilities", "name", s.DisableCaps)
klog.InfoS("Vela-Core init", "definition namespace", oam.SystemDefinitionNamespace)
restConfig := ctrl.GetConfigOrDie()

View File

@ -14,12 +14,10 @@
| revision-limit | int | 50 | revision-limit is the maximum number of revisions that will be maintained. The default value is 50. |
| application-revision-limit | int | 10 | application-revision-limit is the maximum number of application useless revisions that will be maintained, if the useless revisions exceed this number, older ones will be GCed first.The default value is 10. |
| definition-revision-limit | int | 20 | definition-revision-limit is the maximum number of component/trait definition useless revisions that will be maintained, if the useless revisions exceed this number, older ones will be GCed first.The default value is 20. |
| custom-revision-hook-url | string | "" | custom-revision-hook-url is a webhook url which will let KubeVela core to call with applicationConfiguration and component info and return a customized component revision |
| app-config-installed | bool | true | app-config-installed indicates if applicationConfiguration CRD is installed |
| autogen-workload-definition | bool | true | Automatic generated workloadDefinition which componentDefinition refers to |
| health-addr | string | :9440 | The address the health endpoint binds to. |
| apply-once-only | string | false | For the purpose of some production environment that workload or trait should not be affected if no spec change, available options: on, off, force. |
| disable-caps | string | "" | To be disabled builtin capability list. |
| storage-driver | string | Local | Application file save to the storage driver |
| application-re-sync-period | time | 5m | Re-sync period for application to re-sync, also known as the state-keep interval. |
| reconcile-timeout | time | 3m | The timeout for controller reconcile. |

View File

@ -1,58 +0,0 @@
rollout: {
type: "trait"
annotations: {}
description: "Rollout the component."
attributes: {
manageWorkload: true
status: {
customStatus: #"""
message: context.outputs.rollout.status.rollingState
"""#
healthPolicy: #"""
isHealth: context.outputs.rollout.status.batchRollingState == "batchReady"
"""#
}
}
}
template: {
outputs: rollout: {
apiVersion: "standard.oam.dev/v1alpha1"
kind: "Rollout"
metadata: {
name: context.name
namespace: context.namespace
}
spec: {
if parameter.targetRevision != _|_ {
targetRevisionName: parameter.targetRevision
}
if parameter.targetRevision == _|_ {
targetRevisionName: context.revision
}
componentName: context.name
rolloutPlan: {
rolloutStrategy: "IncreaseFirst"
if parameter.rolloutBatches != _|_ {
rolloutBatches: parameter.rolloutBatches
}
targetSize: parameter.targetSize
if parameter["batchPartition"] != _|_ {
batchPartition: parameter.batchPartition
}
}
}
}
parameter: {
// +usage=Specify the target revision, it should be set if you want to rollback. such as: componentname-v1
targetRevision?: string
// +usage=Specify the count of replicas.
targetSize: int
// +usage=Specify the rollout batches, The total number of replicas of all batches needs to be equal to number of targetSize.
rolloutBatches?: [...rolloutBatch]
// +usage=Specify the batch partition in current deploying. It is used to control batch processes.
batchPartition?: int
}
rolloutBatch: replicas: int
}

View File

@ -1,21 +0,0 @@
name: rollout
version: 1.2.3
description: Provides basic batch publishing capability.
url: https://kubevela.io/docs/end-user/traits/rollout
tags:
- extended_workload
- rollout
deployTo:
runtimeCluster: true
dependencies:
# install controller by helm.
- name: fluxcd
# set invisible means this won't be list and will be enabled when depended on
# for example, terraform-alibaba depends on terraform which is invisible,
# when terraform-alibaba is enabled, terraform will be enabled automatically
# default: false
invisible: false

View File

@ -1,7 +0,0 @@
# Supported workload type
Rollout Trait supports following component types: webservice、worker and cloneset.
When using webservice/worker as Workload type with Rollout Trait, Workload's name will be controllerRevision's name. And when Workload's type is cloneset, because of clonset support in-place update Workload's name will always be component's name.

View File

@ -1,10 +0,0 @@
- jsonKey: batchPartition
sort: 1
- jsonKey: targetSize
sort: 3
- jsonKey: rolloutBatches
sort: 5
- jsonKey: targetRevision
sort: 7
validate:
pattern: ^[a-z0-9]+\-v\d{1,5}$

View File

@ -39,8 +39,8 @@ clearRepo() {
git clone --single-branch --depth 1 https://github.com/kubevela/kubevela-core-api.git kubevela-core-api
fi
echo "clear kubevela-core-api api/"
rm -r kubevela-core-api/apis/*
echo "clear kubevela-core-api apis/"
rm -r kubevela-core-api/apis
echo "clear kubevela-core-api pkg/oam"
rm -r kubevela-core-api/pkg/oam/*
@ -58,8 +58,8 @@ clearRepo() {
}
updateRepo() {
echo "update kubevela-core-api api/"
cp -R kubevela/apis/* kubevela-core-api/apis/
echo "update kubevela-core-api apis/"
cp -R kubevela/apis kubevela-core-api/apis
echo "update kubevela-core-api pkg/oam"
cp -R kubevela/pkg/oam/* kubevela-core-api/pkg/oam/

View File

@ -1,12 +0,0 @@
#!/bin/sh
TEMP_DIR="./runtime/rollout/e2e/tmp/"
mkdir -p $TEMP_DIR
cp -r go.mod $TEMP_DIR
cp -r go.sum $TEMP_DIR
cp -r entrypoint.sh $TEMP_DIR
cp -r runtime/rollout/cmd/main.go $TEMP_DIR
cp -r ./apis $TEMP_DIR
cp -r ./pkg $TEMP_DIR
cp -r ./version $TEMP_DIR

View File

@ -19,8 +19,3 @@ docker-build-core:
.PHONY: docker-build-cli
docker-build-cli:
docker build --build-arg=VERSION=$(VELA_VERSION) --build-arg=GITVERSION=$(GIT_COMMIT) -t $(VELA_CLI_IMAGE) -f Dockerfile.cli .
# Build the runtime docker image
.PHONY: docker-build-runtime-rollout
docker-build-runtime-rollout:
docker build --build-arg=VERSION=$(VELA_VERSION) --build-arg=GITVERSION=$(GIT_COMMIT) -t $(VELA_RUNTIME_ROLLOUT_IMAGE) -f runtime/rollout/Dockerfile .

View File

@ -46,7 +46,5 @@ VELA_CORE_IMAGE ?= vela-core:latest
VELA_CLI_IMAGE ?= oamdev/vela-cli:latest
VELA_CORE_TEST_IMAGE ?= vela-core-test:$(GIT_COMMIT)
VELA_APISERVER_IMAGE ?= apiserver:latest
VELA_RUNTIME_ROLLOUT_IMAGE ?= vela-runtime-rollout:latest
VELA_RUNTIME_ROLLOUT_TEST_IMAGE ?= vela-runtime-rollout-test:$(GIT_COMMIT)
RUNTIME_CLUSTER_CONFIG ?= /tmp/worker.client.kubeconfig
RUNTIME_CLUSTER_NAME ?= worker

View File

@ -9,7 +9,6 @@ e2e-setup-core-post-hook:
kill -9 $(lsof -it:9098) || true
go run ./e2e/addon/mock &
bin/vela addon enable ./e2e/addon/mock/testdata/fluxcd
bin/vela addon enable ./e2e/addon/mock/testdata/rollout
bin/vela addon enable ./e2e/addon/mock/testdata/terraform
bin/vela addon enable ./e2e/addon/mock/testdata/terraform-alibaba ALICLOUD_ACCESS_KEY=xxx ALICLOUD_SECRET_KEY=yyy ALICLOUD_REGION=cn-beijing
@ -27,7 +26,6 @@ e2e-setup-core-wo-auth:
--set image.repository=vela-core-test \
--set applicationRevisionLimit=5 \
--set optimize.disableComponentRevision=false \
--set dependCheckWait=10s \
--set image.tag=$(GIT_COMMIT) \
--wait kubevela ./charts/vela-core
@ -40,7 +38,6 @@ e2e-setup-core-w-auth:
--set image.repository=vela-core-test \
--set applicationRevisionLimit=5 \
--set optimize.disableComponentRevision=false \
--set dependCheckWait=10s \
--set image.tag=$(GIT_COMMIT) \
--wait kubevela \
./charts/vela-core \
@ -67,32 +64,6 @@ e2e-setup-core: e2e-setup-core-pre-hook e2e-setup-core-wo-auth e2e-setup-core-po
.PHONY: e2e-setup-core-auth
e2e-setup-core-auth: e2e-setup-core-pre-hook e2e-setup-core-w-auth e2e-setup-core-post-hook
.PHONY: setup-runtime-e2e-cluster
setup-runtime-e2e-cluster:
helm upgrade --install \
--namespace vela-system \
--wait oam-rollout \
--set image.repository=vela-runtime-rollout-test \
--set image.tag=$(GIT_COMMIT) \
--set applicationRevisionLimit=6 \
--set optimize.disableComponentRevision=false \
./runtime/rollout/charts
helm upgrade --install \
--create-namespace \
--namespace vela-system \
--kubeconfig=$(RUNTIME_CLUSTER_CONFIG) \
--set image.pullPolicy=IfNotPresent \
--set image.repository=vela-runtime-rollout-test \
--set image.tag=$(GIT_COMMIT) \
--set applicationRevisionLimit=6 \
--wait vela-rollout \
--set optimize.disableComponentRevision=false \
./runtime/rollout/charts || \
echo "no worker cluster" \
.PHONY: e2e-api-test
e2e-api-test:
# Run e2e test
@ -103,7 +74,7 @@ e2e-api-test:
.PHONY: e2e-test
e2e-test:
# Run e2e test
ginkgo -v --skip="rollout related e2e-test." ./test/e2e-test
ginkgo -v ./test/e2e-test
@$(OK) tests pass
.PHONY: e2e-addon-test
@ -112,11 +83,6 @@ e2e-addon-test:
ginkgo -v ./test/e2e-addon-test
@$(OK) tests pass
.PHONY: e2e-rollout-test
e2e-rollout-test:
ginkgo -v --focus="rollout related e2e-test." ./test/e2e-test
@$(OK) tests pass
.PHONY: e2e-multicluster-test
e2e-multicluster-test:
go test -v -coverpkg=./... -coverprofile=/tmp/e2e_multicluster_test.out ./test/e2e-multicluster-test

View File

@ -1,19 +0,0 @@
"my-trait": {
type: "trait"
annotations: {}
description: "Rollout the component."
attributes: {
manageWorkload: true
status: {
customStatus: #"""
message: context.outputs.rollout.status.rollingState
"""#
healthPolicy: #"""
isHealth: context.outputs.rollout.status.batchRollingState == "batchReady"
"""#
}
}
}
template: {
outputs: rollout: {}
}

View File

@ -18,11 +18,9 @@ package addon
import (
"fmt"
"net/http/httptest"
"os"
"path/filepath"
"reflect"
"strings"
"testing"
. "github.com/onsi/ginkgo/v2"
@ -30,7 +28,6 @@ import (
"github.com/stretchr/testify/assert"
"helm.sh/helm/v3/pkg/chartutil"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"sigs.k8s.io/yaml"
@ -119,29 +116,6 @@ var _ = Describe("Test definition check", func() {
Expect(err).Should(BeNil())
Expect(len(usedApps)).Should(BeEquivalentTo(4))
})
It("check fetch lagacy addon definitions", func() {
res := make(map[string]bool)
server := httptest.NewServer(ossHandler)
defer server.Close()
url := server.URL
cmYaml := strings.ReplaceAll(registryCmYaml, "TEST_SERVER_URL", url)
cm := v1.ConfigMap{}
Expect(yaml.Unmarshal([]byte(cmYaml), &cm)).Should(BeNil())
err := k8sClient.Create(ctx, &cm)
if apierrors.IsAlreadyExists(err) {
Expect(k8sClient.Update(ctx, &cm)).To(Succeed())
} else {
Expect(err).To(Succeed())
}
disableTestAddonApp := v1beta1.Application{}
Expect(yaml.Unmarshal([]byte(addonDisableTestAppYaml), &disableTestAddonApp)).Should(BeNil())
Expect(findLegacyAddonDefs(ctx, k8sClient, "test-disable-addon", disableTestAddonApp.GetLabels()[oam.LabelAddonRegistry], cfg, res)).Should(BeNil())
Expect(len(res)).Should(BeEquivalentTo(2))
})
})
func TestMerge2Map(t *testing.T) {

View File

@ -83,12 +83,9 @@ const (
type Workload struct {
Name string
Type string
ExternalRevision string
CapabilityCategory types.CapabilityCategory
Params map[string]interface{}
Traits []*Trait
Scopes []Scope
ScopeDefinition []*v1beta1.ScopeDefinition
FullTemplate *Template
Ctx process.Context
Patch *value.Value
@ -132,13 +129,6 @@ func (wl *Workload) EvalHealth(templateContext map[string]interface{}) (bool, er
return wl.engine.HealthCheck(templateContext, wl.FullTemplate.Health, wl.Params)
}
// Scope defines the scope of workload
type Scope struct {
Name string
GVK metav1.GroupVersionKind
ResourceVersion string
}
// Trait is ComponentTrait
type Trait struct {
// The Name is name of TraitDefinition, actually it's a type of the trait instance
@ -197,7 +187,6 @@ type Appfile struct {
RelatedTraitDefinitions map[string]*v1beta1.TraitDefinition
RelatedComponentDefinitions map[string]*v1beta1.ComponentDefinition
RelatedWorkflowStepDefinitions map[string]*v1beta1.WorkflowStepDefinition
RelatedScopeDefinitions map[string]*v1beta1.ScopeDefinition
Policies []v1beta1.AppPolicy
PolicyWorkloads []*Workload
@ -571,20 +560,6 @@ func baseGenerateComponent(pCtx process.Context, wl *Workload, appName, ns strin
}
compManifest.Name = wl.Name
compManifest.Namespace = ns
// we record the external revision name in ExternalRevision field
compManifest.ExternalRevision = wl.ExternalRevision
compManifest.Scopes = make([]*corev1.ObjectReference, len(wl.Scopes))
for i, s := range wl.Scopes {
compManifest.Scopes[i] = &corev1.ObjectReference{
APIVersion: metav1.GroupVersion{
Group: s.GVK.Group,
Version: s.GVK.Version,
}.String(),
Kind: s.GVK.Kind,
Name: s.Name,
}
}
return compManifest, nil
}
@ -875,7 +850,6 @@ func generateComponentFromHelmModule(wl *Workload, ctxData velaprocess.ContextDa
compManifest := &types.ComponentManifest{
Name: wl.Name,
Namespace: ctxData.Namespace,
ExternalRevision: wl.ExternalRevision,
StandardWorkload: &unstructured.Unstructured{},
}

View File

@ -161,12 +161,6 @@ func (p *Parser) GenerateAppFileFromApp(ctx context.Context, app *v1beta1.Applic
appfile.RelatedTraitDefinitions[t.FullTemplate.TraitDefinition.Name] = td
}
}
for _, s := range w.ScopeDefinition {
if s == nil {
continue
}
appfile.RelatedScopeDefinitions[s.Name] = s.DeepCopy()
}
}
return appfile, nil
@ -181,7 +175,6 @@ func (p *Parser) newAppFile(appName, ns string, app *v1beta1.Application) *Appfi
AppAnnotations: make(map[string]string),
RelatedTraitDefinitions: make(map[string]*v1beta1.TraitDefinition),
RelatedComponentDefinitions: make(map[string]*v1beta1.ComponentDefinition),
RelatedScopeDefinitions: make(map[string]*v1beta1.ScopeDefinition),
RelatedWorkflowStepDefinitions: make(map[string]*v1beta1.WorkflowStepDefinition),
ExternalPolicies: make(map[string]*v1alpha1.Policy),
@ -319,10 +312,6 @@ func (p *Parser) GenerateAppFileFromRevision(appRev *v1beta1.ApplicationRevision
}
}
for k, v := range appRev.Spec.ScopeDefinitions {
appfile.RelatedScopeDefinitions[k] = v.DeepCopy()
}
return appfile, nil
}
@ -559,7 +548,6 @@ func (p *Parser) convertTemplate2Workload(name, typ string, props *runtime.RawEx
}
return &Workload{
Traits: []*Trait{},
ScopeDefinition: []*v1beta1.ScopeDefinition{},
Name: name,
Type: wlType,
CapabilityCategory: templ.CapabilityCategory,
@ -576,16 +564,10 @@ func (p *Parser) parseWorkload(ctx context.Context, comp common.ApplicationCompo
if err != nil {
return nil, err
}
workload.ExternalRevision = comp.ExternalRevision
if err = p.parseTraits(ctx, workload, comp); err != nil {
return nil, err
}
if err = p.parseScopes(ctx, workload, comp); err != nil {
return nil, err
}
return workload, nil
}
@ -605,22 +587,6 @@ func (p *Parser) parseTraits(ctx context.Context, workload *Workload, comp commo
return nil
}
func (p *Parser) parseScopes(ctx context.Context, workload *Workload, comp common.ApplicationComponent) error {
for scopeType, instanceName := range comp.Scopes {
sd, gvk, err := GetScopeDefAndGVK(ctx, p.client, scopeType)
if err != nil {
return err
}
workload.Scopes = append(workload.Scopes, Scope{
Name: instanceName,
GVK: gvk,
ResourceVersion: sd.Spec.Reference.Name + "/" + sd.Spec.Reference.Version,
})
workload.ScopeDefinition = append(workload.ScopeDefinition, sd)
}
return nil
}
// ParseWorkloadFromRevision resolve an ApplicationComponent and generate a Workload
// containing ALL information required by an Appfile from app revision.
func (p *Parser) ParseWorkloadFromRevision(comp common.ApplicationComponent, appRev *v1beta1.ApplicationRevision) (*Workload, error) {
@ -628,16 +594,11 @@ func (p *Parser) ParseWorkloadFromRevision(comp common.ApplicationComponent, app
if err != nil {
return nil, err
}
workload.ExternalRevision = comp.ExternalRevision
if err = p.parseTraitsFromRevision(comp, appRev, workload); err != nil {
return nil, err
}
if err = p.parseScopesFromRevision(comp, appRev, workload); err != nil {
return nil, err
}
return workload, nil
}
@ -657,22 +618,6 @@ func (p *Parser) parseTraitsFromRevision(comp common.ApplicationComponent, appRe
return nil
}
func (p *Parser) parseScopesFromRevision(comp common.ApplicationComponent, appRev *v1beta1.ApplicationRevision, workload *Workload) error {
for scopeType, instanceName := range comp.Scopes {
sd, gvk, err := GetScopeDefAndGVKFromRevision(scopeType, appRev)
if err != nil {
return err
}
workload.Scopes = append(workload.Scopes, Scope{
Name: instanceName,
GVK: gvk,
ResourceVersion: sd.Spec.Reference.Name + "/" + sd.Spec.Reference.Version,
})
workload.ScopeDefinition = append(workload.ScopeDefinition, sd)
}
return nil
}
// ParseWorkloadFromRevisionAndClient resolve an ApplicationComponent and generate a Workload
// containing ALL information required by an Appfile from app revision, and will fall back to
// load external definitions if not found
@ -684,7 +629,6 @@ func (p *Parser) ParseWorkloadFromRevisionAndClient(ctx context.Context, comp co
if err != nil {
return nil, err
}
workload.ExternalRevision = comp.ExternalRevision
for _, traitValue := range comp.Traits {
properties, err := util.RawExtension2Map(traitValue.Properties)
@ -702,21 +646,6 @@ func (p *Parser) ParseWorkloadFromRevisionAndClient(ctx context.Context, comp co
workload.Traits = append(workload.Traits, trait)
}
for scopeType, instanceName := range comp.Scopes {
sd, gvk, err := GetScopeDefAndGVKFromRevision(scopeType, appRev)
if IsNotFoundInAppRevision(err) {
sd, gvk, err = GetScopeDefAndGVK(ctx, p.client, scopeType)
}
if err != nil {
return nil, err
}
workload.Scopes = append(workload.Scopes, Scope{
Name: instanceName,
GVK: gvk,
ResourceVersion: sd.Spec.Reference.Name + "/" + sd.Spec.Reference.Version,
})
workload.ScopeDefinition = append(workload.ScopeDefinition, sd)
}
return workload, nil
}
@ -767,32 +696,3 @@ func (p *Parser) ValidateComponentNames(app *v1beta1.Application) (int, error) {
}
return 0, nil
}
// GetScopeDefAndGVK get grouped an API version of the given scope
func GetScopeDefAndGVK(ctx context.Context, cli client.Client, name string) (*v1beta1.ScopeDefinition, metav1.GroupVersionKind, error) {
var gvk metav1.GroupVersionKind
sd := new(v1beta1.ScopeDefinition)
err := util.GetDefinition(ctx, cli, sd, name)
if err != nil {
return nil, gvk, err
}
gvk, err = util.GetGVKFromDefinition(cli.RESTMapper(), sd.Spec.Reference)
if err != nil {
return nil, gvk, err
}
return sd, gvk, nil
}
// GetScopeDefAndGVKFromRevision get grouped API version of the given scope
func GetScopeDefAndGVKFromRevision(name string, appRev *v1beta1.ApplicationRevision) (*v1beta1.ScopeDefinition, metav1.GroupVersionKind, error) {
var gvk metav1.GroupVersionKind
sd, ok := appRev.Spec.ScopeDefinitions[name]
if !ok {
return nil, gvk, fmt.Errorf("scope %s not found in application revision", name)
}
gvk, ok = appRev.Spec.ScopeGVK[sd.Spec.Reference.Name+"/"+sd.Spec.Reference.Version]
if !ok {
return nil, gvk, fmt.Errorf("scope definition found but GVK %s not found in application revision", name)
}
return sd.DeepCopy(), gvk, nil
}

View File

@ -24,7 +24,6 @@ import (
"testing"
"github.com/crossplane/crossplane-runtime/pkg/test"
"github.com/kubevela/pkg/util/slices"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/stretchr/testify/assert"
@ -605,85 +604,6 @@ func TestParser_parseTraits(t *testing.T) {
}
}
func TestParser_parseScopes(t *testing.T) {
type args struct {
workload *Workload
comp common.ApplicationComponent
}
tests := []struct {
name string
args args
mockTemplateLoaderFn TemplateLoaderFn
mockGetFunc test.MockGetFn
wantErr assert.ErrorAssertionFunc
validateFunc func(w *Workload) bool
}{
{
name: "test empty scope",
args: args{
comp: common.ApplicationComponent{},
workload: &Workload{},
},
wantErr: assert.NoError,
validateFunc: func(w *Workload) bool {
return w != nil && len(w.Scopes) == 0
},
},
{
name: "test get gvk error",
args: args{
comp: common.ApplicationComponent{
Scopes: map[string]string{
"cluster1": "namespace1",
"cluster2": "namespace2",
},
},
workload: &Workload{},
},
mockGetFunc: func(ctx context.Context, key client.ObjectKey, obj client.Object) error {
return fmt.Errorf("not exist")
},
wantErr: assert.Error,
},
{
name: "test parse scopes success",
args: args{
comp: common.ApplicationComponent{
Scopes: map[string]string{
"cluster1": "namespace1",
"cluster2": "namespace2",
},
},
workload: &Workload{},
},
mockGetFunc: func(ctx context.Context, key client.ObjectKey, obj client.Object) error {
return nil
},
wantErr: assert.NoError,
validateFunc: func(w *Workload) bool {
if w == nil {
return false
}
scopes := slices.Map(w.Scopes, func(scope Scope) string { return scope.Name })
return len(scopes) == 2 && slices.Contains(scopes, "namespace1") && slices.Contains(scopes, "namespace2")
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
p := NewApplicationParser(&test.MockClient{MockGet: tt.mockGetFunc}, pd)
p.tmplLoader = tt.mockTemplateLoaderFn
err := p.parseScopes(context.Background(), tt.args.workload, tt.args.comp)
if !tt.wantErr(t, err, fmt.Sprintf("parseScopes(%v, %v)", tt.args.workload, tt.args.comp)) {
return
}
if tt.validateFunc != nil {
assert.True(t, tt.validateFunc(tt.args.workload))
}
})
}
}
func TestParser_parseTraitsFromRevision(t *testing.T) {
type args struct {
comp common.ApplicationComponent
@ -783,90 +703,3 @@ func TestParser_parseTraitsFromRevision(t *testing.T) {
})
}
}
func TestParser_parseScopesFromRevision(t *testing.T) {
type args struct {
comp common.ApplicationComponent
appRev *v1beta1.ApplicationRevision
workload *Workload
}
tests := []struct {
name string
args args
wantErr assert.ErrorAssertionFunc
validateFunc func(w *Workload) bool
}{
{
name: "test empty scopes",
args: args{
comp: common.ApplicationComponent{},
},
wantErr: assert.NoError,
},
{
name: "test get scope definition from revision failed",
args: args{
comp: common.ApplicationComponent{
Scopes: map[string]string{
"cluster": "namespace",
},
},
appRev: &v1beta1.ApplicationRevision{
Spec: v1beta1.ApplicationRevisionSpec{
ApplicationRevisionCompressibleFields: v1beta1.ApplicationRevisionCompressibleFields{
ScopeDefinitions: map[string]v1beta1.ScopeDefinition{},
},
},
},
},
wantErr: assert.Error,
},
{
name: "test parse scopes from revision success",
args: args{
comp: common.ApplicationComponent{
Scopes: map[string]string{
"cluster": "namespace",
},
},
appRev: &v1beta1.ApplicationRevision{
Spec: v1beta1.ApplicationRevisionSpec{
ApplicationRevisionCompressibleFields: v1beta1.ApplicationRevisionCompressibleFields{
ScopeDefinitions: map[string]v1beta1.ScopeDefinition{
"cluster": {
Spec: v1beta1.ScopeDefinitionSpec{
AllowComponentOverlap: true,
Reference: common.DefinitionReference{
Name: "cluster",
Version: "v1alpha2",
},
},
},
},
ScopeGVK: map[string]metav1.GroupVersionKind{
"cluster/v1alpha2": {
Group: "core.oam.dev",
Version: "v1alpha2",
},
},
},
},
},
workload: &Workload{},
},
wantErr: assert.NoError,
validateFunc: func(w *Workload) bool {
return w != nil && len(w.Scopes) == 1 && w.Scopes[0].ResourceVersion == "cluster/v1alpha2"
},
},
}
p := NewApplicationParser(nil, pd)
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tt.wantErr(t, p.parseScopesFromRevision(tt.args.comp, tt.args.appRev, tt.args.workload), fmt.Sprintf("parseScopesFromRevision(%v, %v, %v)", tt.args.comp, tt.args.appRev, tt.args.workload))
if tt.validateFunc != nil {
assert.True(t, tt.validateFunc(tt.args.workload))
}
})
}
}

View File

@ -46,7 +46,7 @@ const (
)
// Template is a helper struct for processing capability including
// ComponentDefinition, TraitDefinition, ScopeDefinition.
// ComponentDefinition, TraitDefinition.
// It mainly collects schematic and status data of a capability definition.
type Template struct {
TemplateStr string
@ -61,7 +61,6 @@ type Template struct {
ComponentDefinition *v1beta1.ComponentDefinition
WorkloadDefinition *v1beta1.WorkloadDefinition
TraitDefinition *v1beta1.TraitDefinition
ScopeDefinition *v1beta1.ScopeDefinition
PolicyDefinition *v1beta1.PolicyDefinition
WorkflowStepDefinition *v1beta1.WorkflowStepDefinition
@ -143,8 +142,6 @@ func LoadTemplate(ctx context.Context, cli client.Client, capName string, capTyp
return nil, err
}
return tmpl, nil
case types.TypeScope:
// TODO: add scope template support
}
return nil, fmt.Errorf("kind(%s) of %s not supported", capType, capName)
}
@ -218,16 +215,6 @@ func LoadTemplateFromRevision(capName string, capType types.CapType, apprev *v1b
return nil, err
}
return tmpl, nil
case types.TypeScope:
s, ok := apprev.Spec.ScopeDefinitions[capName]
if !ok {
return nil, errors.Errorf("ScopeDefinition [%s] not found in app revision %s", capName, apprev.Name)
}
tmpl, err := newTemplateOfScopeDefinition(s.DeepCopy())
if err != nil {
return nil, err
}
return tmpl, nil
default:
return nil, fmt.Errorf("kind(%s) of %s not supported", capType, capName)
}
@ -252,8 +239,6 @@ func verifyRevisionName(capName string, capType types.CapType, apprev *v1beta1.A
_, ok = apprev.Spec.PolicyDefinitions[splitName]
case types.TypeWorkflowStep:
_, ok = apprev.Spec.WorkflowStepDefinitions[splitName]
case types.TypeScope:
_, ok = apprev.Spec.ScopeDefinitions[splitName]
default:
return capName
}
@ -298,7 +283,6 @@ func DryRunTemplateLoader(defs []oam.Object) TemplateLoaderFn {
}
return tmpl, nil
}
// TODO(roywang) add support for ScopeDefinition
}
}
// not found in provided cap definitions
@ -366,16 +350,6 @@ func newTemplateOfWorkflowStepDefinition(def *v1beta1.WorkflowStepDefinition) (*
return tmpl, nil
}
func newTemplateOfScopeDefinition(def *v1beta1.ScopeDefinition) (*Template, error) {
tmpl := &Template{
ScopeDefinition: def,
}
if err := loadSchematicToTemplate(tmpl, nil, nil, def.Spec.Extension); err != nil {
return nil, errors.WithMessage(err, "cannot load template")
}
return tmpl, nil
}
// loadSchematicToTemplate loads common data that all kind definitions have.
func loadSchematicToTemplate(tmpl *Template, status *common.Status, schematic *common.Schematic, ext *runtime.RawExtension) error {
if status != nil {

View File

@ -17,16 +17,11 @@ limitations under the License.
package core_oam_dev
import (
"time"
"github.com/spf13/pflag"
"github.com/kubevela/workflow/pkg/cue/packages"
)
// ApplyOnceOnlyMode enumerates ApplyOnceOnly modes.
type ApplyOnceOnlyMode string
// Args args used by controller
type Args struct {
@ -42,29 +37,15 @@ type Args struct {
// The default value is 20.
DefRevisionLimit int
// ApplyMode indicates whether workloads and traits should be
// affected if no spec change is made in the ApplicationConfiguration.
ApplyMode ApplyOnceOnlyMode
// CustomRevisionHookURL is a webhook which will let oam-runtime to call with AC+Component info
// The webhook server will return a customized component revision for oam-runtime
CustomRevisionHookURL string
// PackageDiscover used for CRD discovery in CUE packages, a K8s client is contained in it.
PackageDiscover *packages.PackageDiscover
// ConcurrentReconciles is the concurrent reconcile number of the controller
ConcurrentReconciles int
// DependCheckWait is the time to wait for ApplicationConfiguration's dependent-resource ready
DependCheckWait time.Duration
// AutoGenWorkloadDefinition indicates whether automatic generated workloadDefinition which componentDefinition refers to
AutoGenWorkloadDefinition bool
// OAMSpecVer is the oam spec version controller want to setup
OAMSpecVer string
// EnableCompatibility indicates that will change some functions of controller to adapt to multiple platforms, such as asi.
EnableCompatibility bool
@ -83,13 +64,8 @@ func (a *Args) AddFlags(fs *pflag.FlagSet, c *Args) {
"application-revision-limit is the maximum number of application useless revisions that will be maintained, if the useless revisions exceed this number, older ones will be GCed first.The default value is 10.")
fs.IntVar(&a.DefRevisionLimit, "definition-revision-limit", c.DefRevisionLimit,
"definition-revision-limit is the maximum number of component/trait definition useless revisions that will be maintained, if the useless revisions exceed this number, older ones will be GCed first.The default value is 20.")
fs.StringVar(&a.CustomRevisionHookURL, "custom-revision-hook-url", c.CustomRevisionHookURL,
"custom-revision-hook-url is a webhook url which will let KubeVela core to call with applicationConfiguration and component info and return a customized component revision")
fs.BoolVar(&a.AutoGenWorkloadDefinition, "autogen-workload-definition", c.AutoGenWorkloadDefinition, "Automatic generated workloadDefinition which componentDefinition refers to.")
fs.IntVar(&a.ConcurrentReconciles, "concurrent-reconciles", c.ConcurrentReconciles, "concurrent-reconciles is the concurrent reconcile number of the controller. The default value is 4")
fs.DurationVar(&a.DependCheckWait, "depend-check-wait", c.DependCheckWait, "depend-check-wait is the time to wait for ApplicationConfiguration's dependent-resource ready."+
"The default value is 30s, which means if dependent resources were not prepared, the ApplicationConfiguration would be reconciled after 30s.")
fs.StringVar(&a.OAMSpecVer, "oam-spec-ver", c.OAMSpecVer, "oam-spec-ver is the oam spec version controller want to setup, available options: v0.2, v0.3, all")
fs.BoolVar(&a.EnableCompatibility, "enable-asi-compatibility", c.EnableCompatibility, "enable compatibility for asi")
fs.BoolVar(&a.IgnoreAppWithoutControllerRequirement, "ignore-app-without-controller-version", c.IgnoreAppWithoutControllerRequirement, "If true, application controller will not process the app without 'app.oam.dev/controller-version-require' annotation")
fs.BoolVar(&a.IgnoreDefinitionWithoutControllerRequirement, "ignore-definition-without-controller-version", c.IgnoreDefinitionWithoutControllerRequirement, "If true, trait/component/workflowstep definition controller will not process the definition without 'definition.oam.dev/controller-version-require' annotation")

View File

@ -585,7 +585,7 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {
Complete(r)
}
// Setup adds a controller that reconciles AppRollout.
// Setup adds a controller that reconciles App.
func Setup(mgr ctrl.Manager, args core.Args) error {
reconciler := Reconciler{
Client: mgr.GetClient(),

View File

@ -755,50 +755,6 @@ var _ = Describe("Test Application Controller", func() {
Expect(k8sClient.Delete(ctx, app)).Should(BeNil())
})
PIt("Test context revision can be supported by in workload ", func() {
compDef, err := yaml.YAMLToJSON([]byte(workloadWithContextRevision))
Expect(err).Should(BeNil())
component := &v1beta1.ComponentDefinition{}
Expect(json.Unmarshal([]byte(compDef), component)).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
Expect(k8sClient.Create(ctx, component)).Should(BeNil())
ns := corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "app-workload-context-revision",
},
}
Expect(k8sClient.Create(ctx, &ns)).Should(BeNil())
app := &v1beta1.Application{
TypeMeta: metav1.TypeMeta{
Kind: "Application",
APIVersion: "core.oam.dev/v1beta1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "app-test-context-revision",
Namespace: ns.Name,
},
Spec: v1beta1.ApplicationSpec{
Components: []common.ApplicationComponent{
{
Name: "myweb1",
Type: "worker-revision",
Properties: &runtime.RawExtension{Raw: []byte(`{"cmd":["sleep","1000"],"image":"busybox"}`)},
},
},
},
}
Expect(k8sClient.Create(ctx, app)).Should(BeNil())
appKey := types.NamespacedName{Namespace: ns.Name, Name: app.Name}
testutil.ReconcileOnceAfterFinalizer(reconciler, reconcile.Request{NamespacedName: appKey})
checkApp := &v1beta1.Application{}
Expect(k8sClient.Get(ctx, appKey, checkApp)).Should(BeNil())
Expect(checkApp.Status.Phase).Should(BeEquivalentTo(common.ApplicationRunning))
deploy := &v1.Deployment{}
Expect(k8sClient.Get(ctx, types.NamespacedName{Name: "myweb1", Namespace: ns.Name}, deploy)).Should(BeNil())
By("verify targetRevision will be filled with real compRev by context.Revision")
Expect(len(deploy.Spec.Template.Labels)).Should(BeEquivalentTo(2))
Expect(deploy.Spec.Template.Labels["app.oam.dev/revision"]).Should(BeEquivalentTo("myweb1-v1"))
})
It("application with dag workflow failed after retries", func() {
defer featuregatetesting.SetFeatureGateDuringTest(&testing.T{}, utilfeature.DefaultFeatureGate, wffeatures.EnableSuspendOnFailure, true)()
ns := corev1.Namespace{
@ -4556,132 +4512,6 @@ spec:
}
`
tDDefWithHealthStatusYaml = `apiVersion: core.oam.dev/v1beta1
kind: TraitDefinition
metadata:
name: ingress
namespace: vela-system
spec:
status:
customStatus: |-
message: "type: "+ context.outputs.service.spec.type +",\t clusterIP:"+ context.outputs.service.spec.clusterIP+",\t ports:"+ "\(context.outputs.service.spec.ports[0].port)"+",\t domain"+context.outputs.ingress.spec.rules[0].host
healthPolicy: |
isHealth: len(context.outputs.service.spec.clusterIP) > 0
schematic:
cue:
template: |
parameter: {
domain: string
http: [string]: int
}
// trait template can have multiple outputs in one trait
outputs: service: {
apiVersion: "v1"
kind: "Service"
spec: {
selector:
app: context.name
ports: [
for k, v in parameter.http {
port: v
targetPort: v
},
]
}
}
outputs: ingress: {
apiVersion: "networking.k8s.io/v1"
kind: "Ingress"
metadata:
name: context.name
spec: {
rules: [{
host: parameter.domain
http: {
paths: [
for k, v in parameter.http {
path: k
pathType: "Prefix"
backend: {
service: {
name: context.name
port: {
number: v
}
}
}
},
]
}
}]
}
}
`
workloadWithContextRevision = `
apiVersion: core.oam.dev/v1beta1
kind: ComponentDefinition
metadata:
name: worker-revision
namespace: vela-system
annotations:
definition.oam.dev/description: "Long-running scalable backend worker without network endpoint"
spec:
workload:
definition:
apiVersion: apps/v1
kind: Deployment
extension:
healthPolicy: |
isHealth: context.output.status.readyReplicas == context.output.status.replicas
template: |
output: {
apiVersion: "apps/v1"
kind: "Deployment"
metadata: {
annotations: {
if context["config"] != _|_ {
for _, v in context.config {
"\(v.name)" : v.value
}
}
}
}
spec: {
selector: matchLabels: {
"app.oam.dev/component": context.name
}
template: {
metadata: labels: {
"app.oam.dev/component": context.name
"app.oam.dev/revision": context.revision
}
spec: {
containers: [{
name: context.name
image: parameter.image
if parameter["cmd"] != _|_ {
command: parameter.cmd
}
}]
}
}
selector:
matchLabels:
"app.oam.dev/component": context.name
}
}
parameter: {
// +usage=Which image would you like to use for your service
// +short=i
image: string
cmd?: [...string]
}`
k8sObjectsComponentDefinitionYaml = `
apiVersion: core.oam.dev/v1beta1
kind: ComponentDefinition

View File

@ -23,7 +23,6 @@ import (
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"sigs.k8s.io/controller-runtime/pkg/client"
@ -362,7 +361,6 @@ collectNext:
status.Traits = oldStatus
}
status.Traits = append(status.Traits, traitStatusList...)
status.Scopes = generateScopeReference(wl.Scopes)
h.addServiceStatus(true, status)
return &status, output, outputs, isHealth, nil
}
@ -390,21 +388,6 @@ func setStatus(status *common.ApplicationComponentStatus, observedGeneration, ge
return true
}
func generateScopeReference(scopes []appfile.Scope) []corev1.ObjectReference {
var references []corev1.ObjectReference
for _, scope := range scopes {
references = append(references, corev1.ObjectReference{
APIVersion: metav1.GroupVersion{
Group: scope.GVK.Group,
Version: scope.GVK.Version,
}.String(),
Kind: scope.GVK.Kind,
Name: scope.Name,
})
}
return references
}
// ApplyPolicies will render policies into manifests from appfile and dispatch them
func (h *AppHandler) ApplyPolicies(ctx context.Context, af *appfile.Appfile) error {
if ctx, ok := ctx.(monitorContext.Context); ok {

View File

@ -18,7 +18,6 @@ package assemble
import (
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/klog/v2"
@ -52,7 +51,6 @@ type AppManifests struct {
assembledWorkloads map[string]*unstructured.Unstructured
assembledTraits map[string][]*unstructured.Unstructured
// key is workload reference, values are the references of scopes the workload belongs to
referencedScopes map[corev1.ObjectReference][]corev1.ObjectReference
skipWorkloadApplyComp map[string]bool
finalized bool
@ -124,32 +122,16 @@ func (am *AppManifests) AssembledManifests() ([]*unstructured.Unstructured, erro
return r, nil
}
// ReferencedScopes do assemble and return workload reference and referenced scopes
func (am *AppManifests) ReferencedScopes() (map[corev1.ObjectReference][]corev1.ObjectReference, error) {
if !am.finalized {
am.assemble()
}
if am.err != nil {
return nil, am.err
}
r := make(map[corev1.ObjectReference][]corev1.ObjectReference)
for k, refs := range am.referencedScopes {
r[k] = make([]corev1.ObjectReference, len(refs))
copy(r[k], refs)
}
return r, nil
}
// GroupAssembledManifests do assemble and return all resources grouped by components
func (am *AppManifests) GroupAssembledManifests() (
map[string]*unstructured.Unstructured,
map[string][]*unstructured.Unstructured,
map[corev1.ObjectReference][]corev1.ObjectReference, error) {
error) {
if !am.finalized {
am.assemble()
}
if am.err != nil {
return nil, nil, nil, am.err
return nil, nil, am.err
}
workloads := make(map[string]*unstructured.Unstructured)
for k, wl := range am.assembledWorkloads {
@ -162,12 +144,7 @@ func (am *AppManifests) GroupAssembledManifests() (
traits[k][i] = t.DeepCopy()
}
}
scopes := make(map[corev1.ObjectReference][]corev1.ObjectReference)
for k, v := range am.referencedScopes {
scopes[k] = make([]corev1.ObjectReference, len(v))
copy(scopes[k], v)
}
return workloads, traits, scopes, nil
return workloads, traits, nil
}
// checkAutoDetectComponent will check if the standardWorkload is empty,
@ -201,16 +178,7 @@ func (am *AppManifests) assemble() {
}
am.assembledWorkloads[comp.Name] = wl
workloadRef := corev1.ObjectReference{
APIVersion: wl.GetAPIVersion(),
Kind: wl.GetKind(),
Name: wl.GetName(),
}
am.assembledTraits[comp.Name] = traits
am.referencedScopes[workloadRef] = make([]corev1.ObjectReference, len(comp.Scopes))
for i, scope := range comp.Scopes {
am.referencedScopes[workloadRef][i] = *scope
}
}
am.finalizeAssemble(nil)
}
@ -263,7 +231,6 @@ func (am *AppManifests) complete() error {
am.assembledWorkloads = make(map[string]*unstructured.Unstructured)
am.assembledTraits = make(map[string][]*unstructured.Unstructured)
am.referencedScopes = make(map[corev1.ObjectReference][]corev1.ObjectReference)
am.skipWorkloadApplyComp = make(map[string]bool)
return nil
}

View File

@ -60,7 +60,7 @@ var _ = Describe("Test Assemble Options", func() {
Expect(err).Should(BeNil())
ao := NewAppManifests(appRev, appParser)
workloads, traits, _, err := ao.GroupAssembledManifests()
workloads, traits, err := ao.GroupAssembledManifests()
Expect(err).Should(BeNil())
By("Verify amount of result resources")

View File

@ -19,15 +19,9 @@ package assemble
import (
"context"
"fmt"
"reflect"
"strings"
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/crossplane/crossplane-runtime/pkg/fieldpath"
kruisev1alpha1 "github.com/openkruise/kruise-api/apps/v1alpha1"
"github.com/pkg/errors"
appsv1 "k8s.io/api/apps/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/client"
@ -114,67 +108,3 @@ func discoverHelmModuleWorkload(ctx context.Context, c client.Reader, assembledW
assembledWorkload.SetName(qualifiedWorkloadName)
return nil
}
// PrepareWorkloadForRollout prepare the workload before it is emit to the k8s. The current approach is to mark it
// as disabled so that it's spec won't take effect immediately. The rollout controller can take over the resources
// and enable it on its own since app controller here won't override their change
func PrepareWorkloadForRollout(rolloutComp string) WorkloadOption {
return WorkloadOptionFn(func(assembledWorkload *unstructured.Unstructured, _ *v1beta1.ComponentDefinition, _ []*unstructured.Unstructured) error {
compName := assembledWorkload.GetLabels()[oam.LabelAppComponent]
if compName != rolloutComp {
return nil
}
const (
// below are the resources that we know how to disable
cloneSetDisablePath = "spec.updateStrategy.paused"
advancedStatefulSetDisablePath = "spec.updateStrategy.rollingUpdate.paused"
deploymentDisablePath = "spec.paused"
)
pv := fieldpath.Pave(assembledWorkload.UnstructuredContent())
// TODO: we can get the workloadDefinition name from workload.GetLabels()["oam.WorkloadTypeLabel"]
// and use a special field like "disablePath" in the definition to allow configurable behavior
// we hard code the behavior depends on the known assembledWorkload.group/kind for now.
if assembledWorkload.GroupVersionKind().Group == kruisev1alpha1.GroupVersion.Group {
switch assembledWorkload.GetKind() {
case reflect.TypeOf(kruisev1alpha1.CloneSet{}).Name():
err := pv.SetBool(cloneSetDisablePath, true)
if err != nil {
return err
}
klog.InfoS("we render a CloneSet assembledWorkload.paused on the first time",
"kind", assembledWorkload.GetKind(), "instance name", assembledWorkload.GetName())
return nil
case reflect.TypeOf(kruisev1alpha1.StatefulSet{}).Name():
err := pv.SetBool(advancedStatefulSetDisablePath, true)
if err != nil {
return err
}
klog.InfoS("we render an advanced statefulset assembledWorkload.paused on the first time",
"kind", assembledWorkload.GetKind(), "instance name", assembledWorkload.GetName())
return nil
}
}
if assembledWorkload.GroupVersionKind().Group == appsv1.GroupName {
switch assembledWorkload.GetKind() {
case reflect.TypeOf(appsv1.Deployment{}).Name():
if err := pv.SetBool(deploymentDisablePath, true); err != nil {
return err
}
klog.InfoS("we render a deployment assembledWorkload.paused on the first time",
"kind", assembledWorkload.GetKind(), "instance name", assembledWorkload.GetName())
return nil
case reflect.TypeOf(appsv1.StatefulSet{}).Name():
// TODO: Pause StatefulSet here.
return nil
}
}
klog.InfoS("we encountered an unknown resource, we don't know how to prepare it",
"GVK", assembledWorkload.GroupVersionKind().String(), "instance name", assembledWorkload.GetName())
return fmt.Errorf("we do not know how to prepare `%s` as it has an unknown type %s", assembledWorkload.GetName(),
assembledWorkload.GroupVersionKind().String())
})
}

View File

@ -20,7 +20,6 @@ import (
"context"
"fmt"
"os"
"reflect"
"github.com/google/go-cmp/cmp"
. "github.com/onsi/ginkgo/v2"
@ -29,22 +28,16 @@ import (
"sigs.k8s.io/yaml"
"github.com/crossplane/crossplane-runtime/pkg/test"
"github.com/openkruise/kruise-api/apps/v1alpha1"
appsv1 "k8s.io/api/apps/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/apis/types"
helmapi "github.com/oam-dev/kubevela/pkg/appfile/helm/flux2apis"
"github.com/oam-dev/kubevela/pkg/oam"
)
var _ = Describe("Test WorkloadOption", func() {
var (
compName = "test-comp"
appRev *v1beta1.ApplicationRevision
appRev *v1beta1.ApplicationRevision
)
BeforeEach(func() {
@ -55,76 +48,6 @@ var _ = Describe("Test WorkloadOption", func() {
Expect(err).Should(BeNil())
})
Context("test PrepareWorkloadForRollout WorkloadOption", func() {
It("test rollout OpenKruise CloneSet", func() {
By("Use openkruise CloneSet as workload")
cs := &unstructured.Unstructured{}
cs.SetGroupVersionKind(v1alpha1.SchemeGroupVersion.WithKind(reflect.TypeOf(v1alpha1.CloneSet{}).Name()))
cs.SetLabels(map[string]string{oam.LabelAppComponent: compName})
comp := types.ComponentManifest{
Name: compName,
StandardWorkload: cs,
}
By("Add PrepareWorkloadForRollout WorkloadOption")
ao := NewAppManifests(appRev, appParser).WithWorkloadOption(PrepareWorkloadForRollout(compName))
ao.componentManifests = []*types.ComponentManifest{&comp}
workloads, _, _, err := ao.GroupAssembledManifests()
Expect(err).Should(BeNil())
Expect(len(workloads)).Should(Equal(1))
By("Verify workload name is set as component name")
wl := workloads[compName]
Expect(wl.GetName()).Should(Equal(compName))
By("Verify workload is paused")
assembledCS := &v1alpha1.CloneSet{}
runtime.DefaultUnstructuredConverter.FromUnstructured(wl.Object, assembledCS)
Expect(assembledCS.Spec.UpdateStrategy.Paused).Should(BeTrue())
})
It("test rollout OpenKruise StatefulSet", func() {
By("Use openkruise CloneSet as workload")
sts := &unstructured.Unstructured{}
sts.SetGroupVersionKind(v1alpha1.SchemeGroupVersion.WithKind(reflect.TypeOf(v1alpha1.StatefulSet{}).Name()))
sts.SetLabels(map[string]string{oam.LabelAppComponent: compName})
comp := types.ComponentManifest{
Name: compName,
StandardWorkload: sts,
}
By("Add PrepareWorkloadForRollout WorkloadOption")
ao := NewAppManifests(appRev, appParser).WithWorkloadOption(PrepareWorkloadForRollout(compName))
ao.componentManifests = []*types.ComponentManifest{&comp}
workloads, _, _, err := ao.GroupAssembledManifests()
Expect(err).Should(BeNil())
Expect(len(workloads)).Should(Equal(1))
By("Verify workload name is set as component name")
wl := workloads[compName]
Expect(wl.GetName()).Should(Equal(compName))
By("Verify workload is paused")
assembledCS := &v1alpha1.StatefulSet{}
runtime.DefaultUnstructuredConverter.FromUnstructured(wl.Object, assembledCS)
fmt.Println(assembledCS.Spec.UpdateStrategy)
Expect(assembledCS.Spec.UpdateStrategy.RollingUpdate.Paused).Should(BeTrue())
})
It("test rollout Deployment", func() {
By("Add PrepareWorkloadForRollout WorkloadOption")
ao := NewAppManifests(appRev, appParser).WithWorkloadOption(PrepareWorkloadForRollout(compName))
workloads, _, _, err := ao.GroupAssembledManifests()
Expect(err).Should(BeNil())
Expect(len(workloads)).Should(Equal(1))
By("Verify workload name is set as component name")
wl := workloads[compName]
Expect(wl.GetName()).Should(Equal(compName))
By("Verify workload is paused")
assembledDeploy := &appsv1.Deployment{}
runtime.DefaultUnstructuredConverter.FromUnstructured(wl.Object, assembledDeploy)
Expect(assembledDeploy.Spec.Paused).Should(BeTrue())
})
})
Describe("test DiscoveryHelmBasedWorkload", func() {
ns := "test-ns"
releaseName := "test-rls"

View File

@ -39,7 +39,6 @@ import (
"github.com/kubevela/workflow/pkg/cue/packages"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/apis/standard.oam.dev/v1alpha1"
"github.com/oam-dev/kubevela/pkg/appfile"
// +kubebuilder:scaffold:imports
)
@ -80,8 +79,6 @@ var _ = BeforeSuite(func() {
Expect(err).ToNot(HaveOccurred())
Expect(cfg).ToNot(BeNil())
err = v1alpha1.SchemeBuilder.AddToScheme(testScheme)
Expect(err).NotTo(HaveOccurred())
err = v1beta1.SchemeBuilder.AddToScheme(testScheme)
Expect(err).NotTo(HaveOccurred())
err = scheme.AddToScheme(testScheme)

View File

@ -65,8 +65,6 @@ const (
ManifestKeyWorkload = "StandardWorkload"
// ManifestKeyTraits is the key in Component Manifest for containing Trait cr.
ManifestKeyTraits = "Traits"
// ManifestKeyScopes is the key in Component Manifest for containing scope cr reference.
ManifestKeyScopes = "Scopes"
)
var (
@ -149,7 +147,6 @@ func SprintComponentManifest(cm *types.ComponentManifest) string {
trs = append(trs, string(util.MustJSONMarshal(tr)))
}
cl[ManifestKeyTraits] = trs
cl[ManifestKeyScopes] = cm.Scopes
return string(util.MustJSONMarshal(cl))
}
@ -210,10 +207,8 @@ func (h *AppHandler) gatherRevisionSpec(af *appfile.Appfile) (*v1beta1.Applicati
ComponentDefinitions: make(map[string]*v1beta1.ComponentDefinition),
WorkloadDefinitions: make(map[string]v1beta1.WorkloadDefinition),
TraitDefinitions: make(map[string]*v1beta1.TraitDefinition),
ScopeDefinitions: make(map[string]v1beta1.ScopeDefinition),
PolicyDefinitions: make(map[string]v1beta1.PolicyDefinition),
WorkflowStepDefinitions: make(map[string]*v1beta1.WorkflowStepDefinition),
ScopeGVK: make(map[string]metav1.GroupVersionKind),
Policies: make(map[string]v1alpha1.Policy),
},
},
@ -242,15 +237,6 @@ func (h *AppHandler) gatherRevisionSpec(af *appfile.Appfile) (*v1beta1.Applicati
appRev.Spec.TraitDefinitions[t.FullTemplate.TraitDefinition.Name] = td.DeepCopy()
}
}
for _, s := range w.ScopeDefinition {
if s == nil {
continue
}
appRev.Spec.ScopeDefinitions[s.Name] = *s.DeepCopy()
}
for _, s := range w.Scopes {
appRev.Spec.ScopeGVK[s.ResourceVersion] = s.GVK
}
}
for _, p := range af.PolicyWorkloads {
if p == nil || p.FullTemplate == nil {
@ -355,13 +341,6 @@ func ComputeAppRevisionHash(appRevision *v1beta1.ApplicationRevision) (string, e
}
revHash.TraitDefinitionHash[key] = hash
}
for key, sd := range appRevision.Spec.ScopeDefinitions {
hash, err := utils.ComputeSpecHash(&sd.Spec)
if err != nil {
return "", err
}
revHash.ScopeDefinitionHash[key] = hash
}
for key, pd := range appRevision.Spec.PolicyDefinitions {
hash, err := utils.ComputeSpecHash(&pd.Spec)
if err != nil {
@ -460,9 +439,6 @@ func DeepEqualRevision(old, new *v1beta1.ApplicationRevision) bool {
if len(old.Spec.ComponentDefinitions) != len(new.Spec.ComponentDefinitions) {
return false
}
if len(old.Spec.ScopeDefinitions) != len(new.Spec.ScopeDefinitions) {
return false
}
for key, wd := range new.Spec.WorkloadDefinitions {
if !apiequality.Semantic.DeepEqual(old.Spec.WorkloadDefinitions[key].Spec, wd.Spec) {
return false
@ -478,11 +454,6 @@ func DeepEqualRevision(old, new *v1beta1.ApplicationRevision) bool {
return false
}
}
for key, sd := range new.Spec.ScopeDefinitions {
if !apiequality.Semantic.DeepEqual(old.Spec.ScopeDefinitions[key].Spec, sd.Spec) {
return false
}
}
return deepEqualAppInRevision(old, new)
}

View File

@ -157,8 +157,6 @@ var _ = Describe("Test application controller clean up ", func() {
appName := "app-4"
appKey := types.NamespacedName{Namespace: namespace, Name: appName}
app := getApp(appName, namespace, "normal-worker")
metav1.SetMetaDataAnnotation(&app.ObjectMeta, oam.AnnotationAppRollout, "true")
metav1.SetMetaDataAnnotation(&app.ObjectMeta, oam.AnnotationRollingComponent, "comp1")
Expect(k8sClient.Create(ctx, app)).Should(BeNil())
checkApp := new(v1beta1.Application)
for i := 0; i < appRevisionLimit+1; i++ {

View File

@ -21,7 +21,6 @@ import (
"encoding/json"
"fmt"
"reflect"
"strconv"
"testing"
"time"
@ -49,8 +48,6 @@ var _ = Describe("test generate revision ", func() {
var app v1beta1.Application
cd := v1beta1.ComponentDefinition{}
webCompDef := v1beta1.ComponentDefinition{}
wd := v1beta1.WorkloadDefinition{}
rolloutTd := v1beta1.TraitDefinition{}
var handler *AppHandler
var comps []*oamtypes.ComponentManifest
var namespaceName string
@ -116,7 +113,6 @@ var _ = Describe("test generate revision ", func() {
}
appRevision1.Spec.Application = app
appRevision1.Spec.ComponentDefinitions[cd.Name] = cd.DeepCopy()
appRevision1.Spec.TraitDefinitions[rolloutTd.Name] = rolloutTd.DeepCopy()
appRevision2 = *appRevision1.DeepCopy()
appRevision2.Name = "appRevision2"
@ -154,14 +150,6 @@ var _ = Describe("test generate revision ", func() {
verifyEqual()
})
It("Test app revisions with same spec should produce same hash and equal regardless of other fields", func() {
// add an annotation to workload Definition
wd.SetAnnotations(map[string]string{oam.AnnotationAppRollout: "true"})
appRevision2.Spec.ComponentDefinitions[cd.Name] = cd.DeepCopy()
verifyEqual()
})
It("Test app revisions with different application spec should produce different hash and not equal", func() {
// change application setting
appRevision2.Spec.Application.Spec.Components[0].Properties.Raw =
@ -363,127 +351,6 @@ var _ = Describe("test generate revision ", func() {
Expect(curApp.Status.LatestRevision.RevisionHash).Should(Equal(appHash3))
})
It("Test App with rollout template", func() {
By("Apply the application")
appParser := appfile.NewApplicationParser(reconciler.Client, reconciler.pd)
ctx = util.SetNamespaceInCtx(ctx, app.Namespace)
// mark the app as rollout
app.SetAnnotations(map[string]string{oam.AnnotationAppRollout: strconv.FormatBool(true)})
generatedAppfile, err := appParser.GenerateAppFile(ctx, &app)
Expect(err).Should(Succeed())
comps, err = generatedAppfile.GenerateComponentManifests()
Expect(err).Should(Succeed())
Expect(handler.PrepareCurrentAppRevision(ctx, generatedAppfile)).Should(Succeed())
Expect(handler.FinalizeAndApplyAppRevision(ctx)).Should(Succeed())
Expect(handler.ProduceArtifacts(context.Background(), comps, nil)).Should(Succeed())
Expect(handler.UpdateAppLatestRevisionStatus(ctx)).Should(Succeed())
curApp := &v1beta1.Application{}
Eventually(
func() error {
return handler.r.Get(ctx,
types.NamespacedName{Namespace: ns.Name, Name: app.Name},
curApp)
},
time.Second*10, time.Millisecond*500).Should(BeNil())
Expect(curApp.Status.LatestRevision.Revision).Should(BeEquivalentTo(1))
By("Verify the created appRevision is exactly what it is")
curAppRevision := &v1beta1.ApplicationRevision{}
Eventually(
func() error {
return handler.r.Get(ctx,
types.NamespacedName{Namespace: ns.Name, Name: curApp.Status.LatestRevision.Name},
curAppRevision)
},
time.Second*5, time.Millisecond*500).Should(BeNil())
appHash1, err := ComputeAppRevisionHash(curAppRevision)
Expect(err).Should(Succeed())
Expect(curAppRevision.GetLabels()[oam.LabelAppRevisionHash]).Should(Equal(appHash1))
Expect(appHash1).Should(Equal(curApp.Status.LatestRevision.RevisionHash))
ctrlOwner := metav1.GetControllerOf(curAppRevision)
Expect(ctrlOwner).ShouldNot(BeNil())
Expect(ctrlOwner.Kind).Should(Equal(v1beta1.ApplicationKind))
Expect(len(curAppRevision.GetOwnerReferences())).Should(BeEquivalentTo(1))
By("Apply the application again without any spec change but remove the rollout annotation")
annoKey2 := "testKey2"
app.SetAnnotations(map[string]string{annoKey2: "true"})
lastRevision := curApp.Status.LatestRevision.Name
Expect(handler.PrepareCurrentAppRevision(ctx, generatedAppfile)).Should(Succeed())
Expect(handler.FinalizeAndApplyAppRevision(ctx)).Should(Succeed())
Expect(handler.ProduceArtifacts(context.Background(), comps, nil)).Should(Succeed())
Expect(handler.UpdateAppLatestRevisionStatus(ctx)).Should(Succeed())
Eventually(
func() error {
return handler.r.Get(ctx,
types.NamespacedName{Namespace: ns.Name, Name: app.Name},
curApp)
},
time.Second*10, time.Millisecond*500).Should(BeNil())
// no new revision should be created
Expect(curApp.Status.LatestRevision.Name).Should(Equal(lastRevision))
Expect(curApp.Status.LatestRevision.RevisionHash).Should(Equal(appHash1))
By("Verify the appRevision is not changed")
// reset appRev
curAppRevision = &v1beta1.ApplicationRevision{}
Eventually(
func() error {
return handler.r.Get(ctx,
types.NamespacedName{Namespace: ns.Name, Name: lastRevision},
curAppRevision)
},
time.Second*5, time.Millisecond*500).Should(BeNil())
Expect(err).Should(Succeed())
Expect(curAppRevision.GetLabels()[oam.LabelAppRevisionHash]).Should(Equal(appHash1))
Expect(curAppRevision.GetAnnotations()[annoKey2]).ShouldNot(BeEmpty())
By("Change the application and apply again with rollout")
// bump the image tag
app.SetAnnotations(map[string]string{oam.AnnotationAppRollout: strconv.FormatBool(true)})
app.ResourceVersion = curApp.ResourceVersion
app.Spec.Components[0].Properties = &runtime.RawExtension{
Raw: []byte(`{"image": "oamdev/testapp:v2", "cmd": ["node", "server.js"]}`),
}
// persist the app
Expect(k8sClient.Update(ctx, &app)).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
generatedAppfile, err = appParser.GenerateAppFile(ctx, &app)
Expect(err).Should(Succeed())
comps, err = generatedAppfile.GenerateComponentManifests()
Expect(err).Should(Succeed())
handler.app = &app
Expect(handler.PrepareCurrentAppRevision(ctx, generatedAppfile)).Should(Succeed())
Expect(handler.FinalizeAndApplyAppRevision(ctx)).Should(Succeed())
Expect(handler.ProduceArtifacts(context.Background(), comps, nil)).Should(Succeed())
Expect(handler.UpdateAppLatestRevisionStatus(ctx)).Should(Succeed())
Eventually(
func() error {
return handler.r.Get(ctx,
types.NamespacedName{Namespace: ns.Name, Name: app.Name},
curApp)
},
time.Second*10, time.Millisecond*500).Should(BeNil())
// new revision should be created
Expect(curApp.Status.LatestRevision.Name).ShouldNot(Equal(lastRevision))
Expect(curApp.Status.LatestRevision.Revision).Should(BeEquivalentTo(2))
Expect(curApp.Status.LatestRevision.RevisionHash).ShouldNot(Equal(appHash1))
By("Verify the appRevision is changed")
// reset appRev
curAppRevision = &v1beta1.ApplicationRevision{}
Eventually(
func() error {
return handler.r.Get(ctx,
types.NamespacedName{Namespace: ns.Name, Name: curApp.Status.LatestRevision.Name},
curAppRevision)
},
time.Second*5, time.Millisecond*500).Should(BeNil())
appHash2, err := ComputeAppRevisionHash(curAppRevision)
Expect(err).Should(Succeed())
Expect(appHash1).ShouldNot(Equal(appHash2))
Expect(curAppRevision.GetLabels()[oam.LabelAppRevisionHash]).Should(Equal(appHash2))
Expect(curApp.Status.LatestRevision.RevisionHash).Should(Equal(appHash2))
Expect(curAppRevision.GetAnnotations()[annoKey2]).Should(BeEmpty())
Expect(curAppRevision.GetAnnotations()[oam.AnnotationAppRollout]).ShouldNot(BeEmpty())
})
It("Test apply passes all label and annotation from app to appRevision", func() {
By("Apply the application")
appParser := appfile.NewApplicationParser(reconciler.Client, reconciler.pd)

View File

@ -36,7 +36,6 @@ import (
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"k8s.io/utils/pointer"
@ -49,9 +48,7 @@ import (
"github.com/kubevela/workflow/pkg/cue/packages"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/apis/standard.oam.dev/v1alpha1"
"github.com/oam-dev/kubevela/pkg/appfile"
"github.com/oam-dev/kubevela/pkg/features"
"github.com/oam-dev/kubevela/pkg/multicluster"
// +kubebuilder:scaffold:imports
)
@ -98,9 +95,6 @@ var _ = BeforeSuite(func() {
Expect(err).ToNot(HaveOccurred())
Expect(cfg).ToNot(BeNil())
err = v1alpha1.SchemeBuilder.AddToScheme(testScheme)
Expect(err).NotTo(HaveOccurred())
err = v1beta1.SchemeBuilder.AddToScheme(testScheme)
Expect(err).NotTo(HaveOccurred())
@ -147,7 +141,6 @@ var _ = BeforeSuite(func() {
err = mgr.Start(ctx)
Expect(err).NotTo(HaveOccurred())
}()
Expect(utilfeature.DefaultMutableFeatureGate.Set(fmt.Sprintf("%s=true", features.LegacyComponentRevision))).Should(Succeed())
multicluster.InitClusterInfo(cfg)
})

View File

@ -30,30 +30,11 @@ import (
// Setup workload controllers.
func Setup(mgr ctrl.Manager, args controller.Args) error {
switch args.OAMSpecVer {
case "all":
for _, setup := range []func(ctrl.Manager, controller.Args) error{
application.Setup, traitdefinition.Setup, componentdefinition.Setup, policydefinition.Setup, workflowstepdefinition.Setup,
} {
if err := setup(mgr, args); err != nil {
return err
}
}
case "minimal":
for _, setup := range []func(ctrl.Manager, controller.Args) error{
application.Setup, traitdefinition.Setup, componentdefinition.Setup, policydefinition.Setup, workflowstepdefinition.Setup,
} {
if err := setup(mgr, args); err != nil {
return err
}
}
case "v0.3":
for _, setup := range []func(ctrl.Manager, controller.Args) error{
application.Setup, traitdefinition.Setup, componentdefinition.Setup, policydefinition.Setup, workflowstepdefinition.Setup,
} {
if err := setup(mgr, args); err != nil {
return err
}
for _, setup := range []func(ctrl.Manager, controller.Args) error{
application.Setup, traitdefinition.Setup, componentdefinition.Setup, policydefinition.Setup, workflowstepdefinition.Setup,
} {
if err := setup(mgr, args); err != nil {
return err
}
}
return nil

View File

@ -17,25 +17,14 @@ limitations under the License.
package utils
import (
"context"
"encoding/json"
"fmt"
"strconv"
"strings"
"time"
"github.com/mitchellh/hashstructure/v2"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/wait"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/kubevela/workflow/pkg/cue/packages"
commontypes "github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/oam-dev/kubevela/apis/core.oam.dev/condition"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/pkg/oam/util"
)
@ -88,97 +77,3 @@ func ComputeSpecHash(spec interface{}) (string, error) {
specHashLabel := strconv.FormatUint(specHash, 16)
return specHashLabel, nil
}
// RefreshPackageDiscover help refresh package discover
// Deprecated: The function RefreshKubePackagesFromCluster affects performance and the code has been commented a long time.
func RefreshPackageDiscover(ctx context.Context, k8sClient client.Client, pd *packages.PackageDiscover, definition runtime.Object) error {
var gvk metav1.GroupVersionKind
var err error
switch def := definition.(type) {
case *v1beta1.ComponentDefinition:
if def.Spec.Workload.Definition == (commontypes.WorkloadGVK{}) {
workloadDef := new(v1beta1.WorkloadDefinition)
err = k8sClient.Get(ctx, client.ObjectKey{Name: def.Spec.Workload.Type, Namespace: def.Namespace}, workloadDef)
if err != nil {
return err
}
gvk, err = util.GetGVKFromDefinition(k8sClient.RESTMapper(), workloadDef.Spec.Reference)
if err != nil {
return err
}
} else {
gv, err := schema.ParseGroupVersion(def.Spec.Workload.Definition.APIVersion)
if err != nil {
return err
}
gvk = metav1.GroupVersionKind{
Group: gv.Group,
Version: gv.Version,
Kind: def.Spec.Workload.Definition.Kind,
}
}
case *v1beta1.TraitDefinition:
gvk, err = util.GetGVKFromDefinition(k8sClient.RESTMapper(), def.Spec.Reference)
if err != nil {
return err
}
case *v1beta1.PolicyDefinition:
gvk, err = util.GetGVKFromDefinition(k8sClient.RESTMapper(), def.Spec.Reference)
if err != nil {
return err
}
case *v1beta1.WorkflowStepDefinition:
gvk, err = util.GetGVKFromDefinition(k8sClient.RESTMapper(), def.Spec.Reference)
if err != nil {
return err
}
default:
}
targetGVK := metav1.GroupVersionKind{
Group: gvk.Group,
Version: gvk.Version,
Kind: gvk.Kind,
}
if exist := pd.Exist(targetGVK); exist {
return nil
}
if err := pd.RefreshKubePackagesFromCluster(); err != nil {
return err
}
// Test whether the refresh is successful
// if exist := pd.Exist(targetGVK); !exist {
// return fmt.Errorf("get CRD %s error", targetGVK.String())
// }
return nil
}
// GetUnstructuredObjectStatusCondition returns the status.condition with matching condType from an unstructured object.
func GetUnstructuredObjectStatusCondition(obj *unstructured.Unstructured, condType string) (*condition.Condition, bool, error) {
cs, found, err := unstructured.NestedSlice(obj.Object, "status", "conditions")
if err != nil {
return nil, false, err
}
if !found {
return nil, false, nil
}
for _, c := range cs {
b, err := json.Marshal(c)
if err != nil {
return nil, false, err
}
condObj := &condition.Condition{}
err = json.Unmarshal(b, condObj)
if err != nil {
return nil, false, err
}
if string(condObj.Type) != condType {
continue
}
return condObj, true, nil
}
return nil, false, nil
}

View File

@ -18,15 +18,12 @@ package utils
import (
"fmt"
"strconv"
"testing"
"github.com/stretchr/testify/assert"
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/pkg/oam"
oamutil "github.com/oam-dev/kubevela/pkg/oam/util"
)
func TestConstructExtract(t *testing.T) {
@ -74,26 +71,4 @@ func TestGetAppRevison(t *testing.T) {
revisionName, latestRevision = GetAppNextRevision(app)
assert.Equal(t, revisionName, "myapp-v2")
assert.Equal(t, latestRevision, int64(2))
// we generate new revisions if the app is rolling
app.SetAnnotations(map[string]string{oam.AnnotationAppRollout: strconv.FormatBool(true)})
revisionName, latestRevision = GetAppNextRevision(app)
assert.Equal(t, revisionName, "myapp-v2")
assert.Equal(t, latestRevision, int64(2))
app.Status.LatestRevision = &common.Revision{
Name: revisionName,
Revision: latestRevision,
}
// try again
revisionName, latestRevision = GetAppNextRevision(app)
assert.Equal(t, revisionName, "myapp-v3")
assert.Equal(t, latestRevision, int64(3))
app.Status.LatestRevision = &common.Revision{
Name: revisionName,
Revision: latestRevision,
}
// remove the annotation and it will still advance
oamutil.RemoveAnnotations(app, []string{oam.AnnotationAppRollout})
revisionName, latestRevision = GetAppNextRevision(app)
assert.Equal(t, revisionName, "myapp-v4")
assert.Equal(t, latestRevision, int64(4))
}

View File

@ -1,15 +0,0 @@
outputs: rollout: {
apiVersion: "extend.oam.dev/v1alpha2"
kind: "SimpleRolloutTrait"
spec: {
replica: parameter.replica
maxUnavailable: parameter.maxUnavailable
batch: parameter.batch
}
}
parameter: {
replica: *3 | int
maxUnavailable: *1 | int
batch: *2 | int
}

View File

@ -68,7 +68,6 @@ const (
policyDefType = "policy"
workflowStepDefType = "workflow-step"
workloadDefType = "workload"
scopeDefType = "scope"
)
var (
@ -80,7 +79,6 @@ var (
traitDefType: v1beta1.TraitDefinitionKind,
policyDefType: v1beta1.PolicyDefinitionKind,
workloadDefType: v1beta1.WorkloadDefinitionKind,
scopeDefType: v1beta1.ScopeDefinitionKind,
workflowStepDefType: v1beta1.WorkflowStepDefinitionKind,
}
// StringToDefinitionType converts user input to DefinitionType used in DefinitionRevisions
@ -107,7 +105,6 @@ var (
v1beta1.TraitDefinitionKind: traitDefType,
v1beta1.PolicyDefinitionKind: policyDefType,
v1beta1.WorkloadDefinitionKind: workloadDefType,
v1beta1.ScopeDefinitionKind: scopeDefType,
v1beta1.WorkflowStepDefinitionKind: workflowStepDefType,
}
)

View File

@ -1,44 +0,0 @@
/*
Copyright 2022 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package features
import (
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/component-base/featuregate"
)
var (
// APIServerMutableFeatureGate is a mutable version of APIServerFeatureGate
APIServerMutableFeatureGate featuregate.MutableFeatureGate = featuregate.NewFeatureGate()
// APIServerFeatureGate is a shared global FeatureGate for apiserver.
APIServerFeatureGate featuregate.FeatureGate = APIServerMutableFeatureGate
)
const (
// APIServerEnableImpersonation whether to enable impersonation for APIServer
APIServerEnableImpersonation featuregate.Feature = "EnableImpersonation"
// APIServerEnableAdminImpersonation whether to disable User admin impersonation for APIServer
APIServerEnableAdminImpersonation featuregate.Feature = "EnableAdminImpersonation"
)
func init() {
runtime.Must(APIServerMutableFeatureGate.Add(map[featuregate.Feature]featuregate.FeatureSpec{
APIServerEnableImpersonation: {Default: false, PreRelease: featuregate.Alpha},
APIServerEnableAdminImpersonation: {Default: true, PreRelease: featuregate.Alpha},
}))
}

View File

@ -33,8 +33,6 @@ const (
DeprecatedObjectLabelSelector featuregate.Feature = "DeprecatedObjectLabelSelector"
// LegacyResourceTrackerGC enable the gc of legacy resource tracker in managed clusters
LegacyResourceTrackerGC featuregate.Feature = "LegacyResourceTrackerGC"
// LegacyComponentRevision if enabled, create component revision even no rollout trait attached
LegacyComponentRevision featuregate.Feature = "LegacyComponentRevision"
// LegacyResourceOwnerValidation if enabled, the resource dispatch will allow existing resource not to have owner
// application and the current application will take over it
LegacyResourceOwnerValidation featuregate.Feature = "LegacyResourceOwnerValidation"
@ -120,7 +118,6 @@ var defaultFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{
LegacyObjectTypeIdentifier: {Default: false, PreRelease: featuregate.Alpha},
DeprecatedObjectLabelSelector: {Default: false, PreRelease: featuregate.Alpha},
LegacyResourceTrackerGC: {Default: false, PreRelease: featuregate.Beta},
LegacyComponentRevision: {Default: false, PreRelease: featuregate.Alpha},
LegacyResourceOwnerValidation: {Default: false, PreRelease: featuregate.Alpha},
DisableReferObjectsFromURL: {Default: false, PreRelease: featuregate.Alpha},
ApplyResourceByReplace: {Default: false, PreRelease: featuregate.Alpha},

View File

@ -130,9 +130,6 @@ const (
)
const (
// AnnotationAppGeneration records the generation of AppConfig
AnnotationAppGeneration = "app.oam.dev/generation"
// AnnotationLastAppliedConfig records the previous configuration of a
// resource for use in a three-way diff during a patching apply
AnnotationLastAppliedConfig = "app.oam.dev/last-applied-configuration"
@ -140,19 +137,10 @@ const (
// AnnotationLastAppliedTime indicates the last applied time
AnnotationLastAppliedTime = "app.oam.dev/last-applied-time"
// AnnotationAppRollout indicates that the application is still rolling out
// the application controller should treat it differently
AnnotationAppRollout = "app.oam.dev/rollout-template"
// AnnotationInplaceUpgrade indicates the workload should upgrade with the the same name
// the name of the workload instance should not changing along with the revision
AnnotationInplaceUpgrade = "app.oam.dev/inplace-upgrade"
// AnnotationRollingComponent indicates that the component is rolling out
// this is to enable any concerned controllers to handle the first component apply logic differently
// the value of the annotation is a list of component name of all the new component
AnnotationRollingComponent = "app.oam.dev/rolling-components"
// AnnotationAppRevision indicates that the object is an application revision
// its controller should not try to reconcile it
AnnotationAppRevision = "app.oam.dev/app-revision"

View File

@ -28,9 +28,6 @@ import (
"k8s.io/apimachinery/pkg/runtime/schema"
)
// ScopeKind contains the type metadata for a kind of an OAM scope resource.
type ScopeKind schema.GroupVersionKind
// TraitKind contains the type metadata for a kind of an OAM trait resource.
type TraitKind schema.GroupVersionKind
@ -76,14 +73,6 @@ type Trait interface {
WorkloadReferencer
}
// A Scope is a type of OAM scope.
type Scope interface {
Object
Conditioned
WorkloadsReferencer
}
// A Workload is a type of OAM workload.
type Workload interface {
Object

View File

@ -56,27 +56,6 @@ func TestUnstructured(t *testing.T) {
resource: "deployments",
exp: "deployments.apps",
},
"extended resource": {
u: &unstructured.Unstructured{Object: map[string]interface{}{
"apiVersion": "extend.oam.dev/v1beta1",
"kind": "SimpleRolloutTrait",
}},
resource: "simplerollouttraits",
exp: "simplerollouttraits.extend.oam.dev",
},
"trait": {
u: &unstructured.Unstructured{Object: map[string]interface{}{
"apiVersion": "extend.oam.dev/v1beta1",
"kind": "SimpleRolloutTrait",
"metadata": map[string]interface{}{
"labels": map[string]interface{}{
oam.TraitTypeLabel: "rollout",
},
},
}},
typeLabel: oam.TraitTypeLabel,
exp: "rollout",
},
"workload": {
u: &unstructured.Unstructured{Object: map[string]interface{}{
"apiVersion": "apps/v1",
@ -868,109 +847,6 @@ func TestGetDefinition(t *testing.T) {
assert.Equal(t, &appTraitDefinition, appTd)
}
func TestGetScopeDefinition(t *testing.T) {
ctx := context.Background()
namespace := "vela-app"
ctx = util.SetNamespaceInCtx(ctx, namespace)
scopeDefinitionKind := "ScopeDefinition"
mockVerision := "core.oam.dev/v1beta1"
scopeDefinitionName := "mockscopes.core.oam.dev"
scopeDefinitionRefName := "mockscopes.core.oam.dev"
scopeDefinitionWorkloadRefsPath := "spec.workloadRefs"
sysScopeDefinition := v1beta1.ScopeDefinition{
TypeMeta: metav1.TypeMeta{
Kind: scopeDefinitionKind,
APIVersion: mockVerision,
},
ObjectMeta: metav1.ObjectMeta{
Name: scopeDefinitionName,
Namespace: "vela-system",
},
Spec: v1beta1.ScopeDefinitionSpec{
Reference: common.DefinitionReference{
Name: scopeDefinitionRefName,
},
WorkloadRefsPath: scopeDefinitionWorkloadRefsPath,
AllowComponentOverlap: false,
},
}
appScopeDefinition := v1beta1.ScopeDefinition{
TypeMeta: metav1.TypeMeta{
Kind: scopeDefinitionKind,
APIVersion: mockVerision,
},
ObjectMeta: metav1.ObjectMeta{
Name: scopeDefinitionName,
Namespace: namespace,
},
Spec: v1beta1.ScopeDefinitionSpec{
Reference: common.DefinitionReference{
Name: scopeDefinitionRefName,
},
WorkloadRefsPath: scopeDefinitionWorkloadRefsPath,
AllowComponentOverlap: false,
},
}
type fields struct {
getFunc test.MockGetFn
}
type want struct {
spd *v1beta1.ScopeDefinition
err error
}
cases := map[string]struct {
fields fields
want want
}{
"app defintion will overlay system definition": {
fields: fields{
getFunc: func(ctx context.Context, key client.ObjectKey, obj client.Object) error {
o := obj.(*v1beta1.ScopeDefinition)
if key.Namespace == "vela-system" {
*o = sysScopeDefinition
} else {
*o = appScopeDefinition
}
return nil
},
},
want: want{
spd: &appScopeDefinition,
err: nil,
},
},
"return system definition when cannot find in app ns": {
fields: fields{
getFunc: func(ctx context.Context, key client.ObjectKey, obj client.Object) error {
if key.Namespace == "vela-system" {
o := obj.(*v1beta1.ScopeDefinition)
*o = sysScopeDefinition
return nil
}
return apierrors.NewNotFound(schema.GroupResource{Group: "core.oma.dev", Resource: "scopeDefinition"}, key.Name)
},
},
want: want{
spd: &sysScopeDefinition,
err: nil,
},
},
}
for name, tc := range cases {
tclient := test.MockClient{
MockGet: tc.fields.getFunc,
}
got := new(v1beta1.ScopeDefinition)
err := util.GetDefinition(ctx, &tclient, got, "mockdefinition")
t.Log(fmt.Sprint("Running test: ", name))
assert.Equal(t, tc.want.err, err)
assert.Equal(t, tc.want.spd, got)
}
}
func TestExtractRevisionNum(t *testing.T) {
testcases := []struct {
revName string

View File

@ -26,7 +26,6 @@ import (
. "github.com/onsi/gomega"
oamcore "github.com/oam-dev/kubevela/apis/core.oam.dev"
oamstd "github.com/oam-dev/kubevela/apis/standard.oam.dev/v1alpha1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -69,7 +68,6 @@ var _ = BeforeSuite(func() {
logf.SetLogger(zap.New(zap.UseDevMode(true), zap.WriteTo(GinkgoWriter)))
Expect(clientgoscheme.AddToScheme(testScheme)).Should(Succeed())
Expect(oamcore.AddToScheme(testScheme)).Should(Succeed())
Expect(oamstd.AddToScheme(testScheme)).Should(Succeed())
By("Setting up applicator")
rawClient, err = client.New(cfg, client.Options{Scheme: testScheme})

View File

@ -69,7 +69,6 @@ import (
oamcore "github.com/oam-dev/kubevela/apis/core.oam.dev"
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
oamstandard "github.com/oam-dev/kubevela/apis/standard.oam.dev/v1alpha1"
"github.com/oam-dev/kubevela/apis/types"
velacue "github.com/oam-dev/kubevela/pkg/cue"
"github.com/oam-dev/kubevela/pkg/cue/process"
@ -105,7 +104,6 @@ func init() {
_ = apiregistrationv1.AddToScheme(Scheme)
_ = crdv1.AddToScheme(Scheme)
_ = oamcore.AddToScheme(Scheme)
_ = oamstandard.AddToScheme(Scheme)
_ = istioclientv1beta1.AddToScheme(Scheme)
_ = certmanager.AddToScheme(Scheme)
_ = kruise.AddToScheme(Scheme)

View File

@ -1,154 +0,0 @@
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package rollout
import (
"fmt"
"net/http"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/oam-dev/kubevela/apis/standard.oam.dev/v1alpha1"
)
// DefaultRolloutBatches set the default values for a rollout batches
// This is called by the mutation webhooks and before the validators
func DefaultRolloutBatches(rollout *v1alpha1.RolloutPlan) {
if rollout.TargetSize != nil && rollout.NumBatches != nil && rollout.RolloutBatches == nil {
// create the rollout batch based on the total size and num batches if it's not set
// leave it for the validator to validate more if they are both set
numBatches := int(*rollout.NumBatches)
// create the batch array
rollout.RolloutBatches = make([]v1alpha1.RolloutBatch, numBatches)
FillRolloutBatches(rollout, int(*rollout.TargetSize), numBatches)
for i, batch := range rollout.RolloutBatches {
klog.InfoS("mutation webhook assigns rollout plan", "batch", i, "replica",
batch.Replicas.IntValue())
}
}
}
// DefaultRolloutPlan set the default values for a rollout plan
func DefaultRolloutPlan(rollout *v1alpha1.RolloutPlan) {
if len(rollout.RolloutStrategy) == 0 {
rollout.RolloutStrategy = v1alpha1.IncreaseFirstRolloutStrategyType
}
}
// FillRolloutBatches fills the replicas in each batch depends on the total size and number of batches
func FillRolloutBatches(rollout *v1alpha1.RolloutPlan, totalSize int, numBatches int) {
total := totalSize
for total > 0 {
for i := numBatches - 1; i >= 0 && total > 0; i-- {
replica := rollout.RolloutBatches[i].Replicas.IntValue() + 1
rollout.RolloutBatches[i].Replicas = intstr.FromInt(replica)
total--
}
}
}
// ValidateCreate validate the rollout plan
func ValidateCreate(client client.Client, rollout *v1alpha1.RolloutPlan, rootPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
if rollout.RolloutBatches == nil {
allErrs = append(allErrs, field.Required(rootPath.Child("rolloutBatches"), "the rollout has to have batches"))
}
// the rollout batch partition is either automatic or positive
if rollout.BatchPartition != nil && *rollout.BatchPartition < 0 {
allErrs = append(allErrs, field.Invalid(rootPath.Child("batchPartition"), rollout.BatchPartition,
"the rollout plan has to be positive"))
}
// NumBatches has to be the size of RolloutBatches
if rollout.NumBatches != nil && len(rollout.RolloutBatches) != int(*rollout.NumBatches) {
allErrs = append(allErrs, field.Invalid(rootPath.Child("numBatches"), rollout.NumBatches,
"the num batches does not match the rollout batch size"))
}
if rollout.RolloutStrategy != v1alpha1.IncreaseFirstRolloutStrategyType &&
rollout.RolloutStrategy != v1alpha1.DecreaseFirstRolloutStrategyType {
allErrs = append(allErrs, field.Invalid(rootPath.Child("rolloutStrategy"),
rollout.RolloutStrategy, "the rolloutStrategy can only be IncreaseFirst or DecreaseFirst"))
}
// validate the webhooks
allErrs = append(allErrs, validateWebhook(rollout, rootPath)...)
// validate the rollout batches
allErrs = append(allErrs, validateRolloutBatches(rollout, rootPath)...)
// TODO: The total number of num in the batches match the current target resource pod size
return allErrs
}
func validateWebhook(rollout *v1alpha1.RolloutPlan, rootPath *field.Path) (allErrs field.ErrorList) {
// The webhooks in the rollout plan can only be initialize or finalize webhooks
if rollout.RolloutWebhooks != nil {
webhookPath := rootPath.Child("rolloutWebhooks")
for i, rw := range rollout.RolloutWebhooks {
if rw.Type != v1alpha1.InitializeRolloutHook && rw.Type != v1alpha1.FinalizeRolloutHook {
allErrs = append(allErrs, field.Invalid(webhookPath.Index(i),
rw.Type, "the rollout webhook type can only be initialize or finalize webhook"))
}
// TODO: check the URL/name uniqueness?
if rw.Method != http.MethodPost && rw.Method != http.MethodGet && rw.Method != http.MethodPut {
allErrs = append(allErrs, field.Invalid(webhookPath.Index(i),
rw.Method, "the rollout webhook method can only be Get/PUT/POST"))
}
}
}
// The webhooks in the rollout batch can only be pre or post batch types
if rollout.RolloutBatches != nil {
batchesPath := rootPath.Child("rolloutBatches")
for i, rb := range rollout.RolloutBatches {
rolloutBatchPath := batchesPath.Index(i)
for j, brw := range rb.BatchRolloutWebhooks {
if brw.Type != v1alpha1.PostBatchRolloutHook && brw.Type != v1alpha1.PreBatchRolloutHook {
allErrs = append(allErrs, field.Invalid(rolloutBatchPath.Child("batchRolloutWebhooks").Index(j),
brw.Type, "the batch webhook type can only be pre or post batch webhook"))
}
// TODO: check the URL/name uniqueness?
}
}
}
return allErrs
}
func validateRolloutBatches(rollout *v1alpha1.RolloutPlan, rootPath *field.Path) (allErrs field.ErrorList) {
if rollout.RolloutBatches != nil {
batchesPath := rootPath.Child("rolloutBatches")
for i, rb := range rollout.RolloutBatches {
rolloutBatchPath := batchesPath.Index(i)
// validate rb.Replicas with a common total number
value, err := intstr.GetValueFromIntOrPercent(&rb.Replicas, 100, true)
if err != nil {
allErrs = append(allErrs, field.Invalid(rolloutBatchPath.Child("replicas"),
rb.Replicas, fmt.Sprintf("invalid replica value, err = %s", err)))
} else if value < 0 {
allErrs = append(allErrs, field.Invalid(rolloutBatchPath.Child("replicas"),
value, "negative replica value"))
}
}
}
return allErrs
}

View File

@ -1,157 +0,0 @@
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package rollout
import (
"testing"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/utils/pointer"
"github.com/oam-dev/kubevela/apis/standard.oam.dev/v1alpha1"
)
func TestDefaultRolloutPlan_EvenlyDivide(t *testing.T) {
var numBatch int32 = 5
rollout := &v1alpha1.RolloutPlan{
TargetSize: &numBatch,
NumBatches: &numBatch,
}
DefaultRolloutBatches(rollout)
if len(rollout.RolloutBatches) != int(numBatch) {
t.Errorf("number of batch %d does not equal to %d ", len(rollout.RolloutBatches), numBatch)
}
for i, batch := range rollout.RolloutBatches {
if batch.Replicas.IntVal != int32(1) {
t.Errorf("batch %d replica does not equal to 1", i)
}
}
}
func TestDefaultRolloutPlan_HasRemanence(t *testing.T) {
var numBatch int32 = 5
rollout := &v1alpha1.RolloutPlan{
TargetSize: pointer.Int32(8),
NumBatches: &numBatch,
}
DefaultRolloutBatches(rollout)
if len(rollout.RolloutBatches) != int(numBatch) {
t.Errorf("number of batch %d does not equal to %d ", len(rollout.RolloutBatches), numBatch)
}
if rollout.RolloutBatches[0].Replicas.IntValue() != 1 {
t.Errorf("batch 0's replica %d does not equal to 1", rollout.RolloutBatches[0].Replicas.IntValue())
}
if rollout.RolloutBatches[1].Replicas.IntValue() != 1 {
t.Errorf("batch 1's replica %d does not equal to 1", rollout.RolloutBatches[1].Replicas.IntValue())
}
if rollout.RolloutBatches[2].Replicas.IntValue() != 2 {
t.Errorf("batch 2's replica %d does not equal to 2", rollout.RolloutBatches[2].Replicas.IntValue())
}
if rollout.RolloutBatches[3].Replicas.IntValue() != 2 {
t.Errorf("batch 3's replica %d does not equal to 2", rollout.RolloutBatches[3].Replicas.IntValue())
}
if rollout.RolloutBatches[4].Replicas.IntValue() != 2 {
t.Errorf("batch 4's replica %d does not equal to 2", rollout.RolloutBatches[4].Replicas.IntValue())
}
}
func TestDefaultRolloutPlan_NotEnough(t *testing.T) {
var numBatch int32 = 5
rollout := &v1alpha1.RolloutPlan{
TargetSize: pointer.Int32(4),
NumBatches: &numBatch,
}
DefaultRolloutBatches(rollout)
if len(rollout.RolloutBatches) != int(numBatch) {
t.Errorf("number of batch %d does not equal to %d ", len(rollout.RolloutBatches), numBatch)
}
if rollout.RolloutBatches[0].Replicas.IntValue() != 0 {
t.Errorf("batch 0's replica %d does not equal to 0", rollout.RolloutBatches[0].Replicas.IntValue())
}
if rollout.RolloutBatches[1].Replicas.IntValue() != 1 {
t.Errorf("batch 1's replica %d does not equal to 1", rollout.RolloutBatches[1].Replicas.IntValue())
}
if rollout.RolloutBatches[2].Replicas.IntValue() != 1 {
t.Errorf("batch 2's replica %d does not equal to 1", rollout.RolloutBatches[2].Replicas.IntValue())
}
if rollout.RolloutBatches[3].Replicas.IntValue() != 1 {
t.Errorf("batch 3's replica %d does not equal to 1", rollout.RolloutBatches[3].Replicas.IntValue())
}
if rollout.RolloutBatches[4].Replicas.IntValue() != 1 {
t.Errorf("batch 4's replica %d does not equal to 1", rollout.RolloutBatches[4].Replicas.IntValue())
}
}
func TestFillRolloutBatches_WithPositiveOriginalSize(t *testing.T) {
var numBatch int32 = 4
rollout := &v1alpha1.RolloutPlan{
RolloutBatches: make([]v1alpha1.RolloutBatch, numBatch),
}
FillRolloutBatches(rollout, 9, 4)
if rollout.RolloutBatches[0].Replicas.IntValue() != 2 {
t.Errorf("batch 0's replica %d does not equal to 2", rollout.RolloutBatches[0].Replicas.IntValue())
}
if rollout.RolloutBatches[1].Replicas.IntValue() != 2 {
t.Errorf("batch 1's replica %d does not equal to 2", rollout.RolloutBatches[1].Replicas.IntValue())
}
if rollout.RolloutBatches[2].Replicas.IntValue() != 2 {
t.Errorf("batch 2's replica %d does not equal to 2", rollout.RolloutBatches[2].Replicas.IntValue())
}
if rollout.RolloutBatches[3].Replicas.IntValue() != 3 {
t.Errorf("batch 3's replica %d does not equal to 3", rollout.RolloutBatches[3].Replicas.IntValue())
}
}
func TestValidateIllegalReplicas(t *testing.T) {
illegalReplica := &v1alpha1.RolloutPlan{
RolloutBatches: []v1alpha1.RolloutBatch{
{
Replicas: intstr.FromString("0.2"),
},
},
}
if errList := validateRolloutBatches(illegalReplica, field.NewPath("spec")); len(errList) != 1 {
t.Error("should invalidate illegal replica value")
}
illegalReplica = &v1alpha1.RolloutPlan{
RolloutBatches: []v1alpha1.RolloutBatch{
{
Replicas: intstr.FromString("ab"),
},
},
}
if errList := validateRolloutBatches(illegalReplica, field.NewPath("spec")); len(errList) != 1 {
t.Error("should invalidate illegal replica value")
}
// negative replica case
negativeReplica := &v1alpha1.RolloutPlan{
RolloutBatches: []v1alpha1.RolloutBatch{
{
Replicas: intstr.FromInt(-1),
},
},
}
if errList := validateRolloutBatches(negativeReplica, field.NewPath("spec")); len(errList) == 0 {
t.Error("should invalidate negative replica value")
}
}

View File

@ -28,25 +28,11 @@ import (
// Register will be called in main and register all validation handlers
func Register(mgr manager.Manager, args controller.Args) {
switch args.OAMSpecVer {
case "all":
application.RegisterValidatingHandler(mgr, args)
componentdefinition.RegisterMutatingHandler(mgr, args)
componentdefinition.RegisterValidatingHandler(mgr)
traitdefinition.RegisterValidatingHandler(mgr, args)
case "minimal":
application.RegisterValidatingHandler(mgr, args)
componentdefinition.RegisterMutatingHandler(mgr, args)
componentdefinition.RegisterValidatingHandler(mgr)
traitdefinition.RegisterValidatingHandler(mgr, args)
case "v0.3":
application.RegisterValidatingHandler(mgr, args)
application.RegisterMutatingHandler(mgr)
componentdefinition.RegisterMutatingHandler(mgr, args)
componentdefinition.RegisterValidatingHandler(mgr)
traitdefinition.RegisterValidatingHandler(mgr, args)
}
application.RegisterValidatingHandler(mgr, args)
application.RegisterMutatingHandler(mgr)
componentdefinition.RegisterMutatingHandler(mgr, args)
componentdefinition.RegisterValidatingHandler(mgr)
traitdefinition.RegisterValidatingHandler(mgr, args)
server := mgr.GetWebhookServer()
server.Register("/convert", &conversion.Webhook{})
}

View File

@ -110,25 +110,6 @@ var _ = Describe("Test Application Validator", func() {
Expect(resp.Allowed).Should(BeFalse())
})
It("Test Application Validator rollout-template annotation [error]", func() {
req := admission.Request{
AdmissionRequest: admissionv1.AdmissionRequest{
Operation: admissionv1.Create,
Resource: metav1.GroupVersionResource{Group: "core.oam.dev", Version: "v1alpha2", Resource: "applications"},
Object: runtime.RawExtension{
Raw: []byte(`
{"apiVersion":"core.oam.dev/v1beta1","kind":"Application",
"metadata":{"name":"application-sample","annotations":{"app.oam.dev/rollout-template":"false"}},
"spec":{"components":[{"type":"worker","properties":{"cmd":["sleep","1000"],"image":"busybox"},
"traits":[{"type":"scaler","properties":{"replicas":10}}]}]}}
`),
},
},
}
resp := handler.Handle(ctx, req)
Expect(resp.Allowed).Should(BeFalse())
})
It("Test Application Validator workflow step name duplicate [error]", func() {
By("test duplicated step name in workflow")
req := admission.Request{

View File

@ -23,8 +23,6 @@ import (
"github.com/kubevela/pkg/controller/sharding"
"github.com/kubevela/pkg/util/singleton"
appsv1 "k8s.io/api/apps/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/util/validation/field"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"sigs.k8s.io/controller-runtime/pkg/client"
@ -32,7 +30,6 @@ import (
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/pkg/appfile"
"github.com/oam-dev/kubevela/pkg/features"
"github.com/oam-dev/kubevela/pkg/oam"
)
// ValidateWorkflow validates the Application workflow
@ -107,10 +104,6 @@ func (h *ValidatingHandler) ValidateComponents(ctx context.Context, app *v1beta1
if err := appParser.ValidateCUESchematicAppfile(af); err != nil {
componentErrs = append(componentErrs, field.Invalid(field.NewPath("schematic"), app, err.Error()))
}
if v := app.GetAnnotations()[oam.AnnotationAppRollout]; len(v) != 0 && v != "true" {
componentErrs = append(componentErrs, field.Invalid(field.NewPath("annotation:app.oam.dev/rollout-template"), app, "the annotation value of rollout-template must be true"))
}
componentErrs = append(componentErrs, h.validateExternalRevisionName(ctx, app)...)
return componentErrs
}
@ -130,29 +123,3 @@ func (h *ValidatingHandler) ValidateUpdate(ctx context.Context, newApp, oldApp *
// TODO: add more validating
return errs
}
func (h *ValidatingHandler) validateExternalRevisionName(ctx context.Context, app *v1beta1.Application) field.ErrorList {
var componentErrs field.ErrorList
for index, comp := range app.Spec.Components {
if len(comp.ExternalRevision) == 0 {
continue
}
revisionName := comp.ExternalRevision
cr := &appsv1.ControllerRevision{}
if err := h.Client.Get(ctx, client.ObjectKey{Namespace: app.Namespace, Name: revisionName}, cr); err != nil {
if !apierrors.IsNotFound(err) {
componentErrs = append(componentErrs, field.Invalid(field.NewPath(fmt.Sprintf("components[%d].externalRevision", index)), app, err.Error()))
}
continue
}
labeledControllerComponent := cr.GetLabels()[oam.LabelControllerRevisionComponent]
if labeledControllerComponent != comp.Name {
componentErrs = append(componentErrs, field.Invalid(field.NewPath(fmt.Sprintf("components[%d].externalRevision", index)), app, fmt.Sprintf("label:%s for revision:%s should be equal with component name", oam.LabelControllerRevisionComponent, revisionName)))
continue
}
}
return componentErrs
}

View File

@ -226,7 +226,7 @@ func startReferenceDocsSite(ctx context.Context, ns string, c common.Args, ioStr
return nil
}
if capabilityType != types.TypeWorkload && capabilityType != types.TypeTrait && capabilityType != types.TypeScope &&
if capabilityType != types.TypeWorkload && capabilityType != types.TypeTrait &&
capabilityType != types.TypeComponentDefinition && capabilityType != types.TypeWorkflowStep && capabilityType != "" {
return fmt.Errorf("unsupported type: %v", capabilityType)
}
@ -439,7 +439,6 @@ func getDefinitions(capabilities []types.Capability) ([]string, []string, []stri
workflowSteps = append(workflowSteps, c.Name)
case types.TypePolicy:
policies = append(policies, c.Name)
case types.TypeScope:
case types.TypeWorkload:
default:
}

View File

@ -152,7 +152,6 @@ func TestGetWorkloadAndTraits(t *testing.T) {
var (
workloadName = "component1"
traitName = "trait1"
scopeName = "scope1"
policyName = "policy1"
)
@ -178,19 +177,6 @@ func TestGetWorkloadAndTraits(t *testing.T) {
traits: []string{traitName},
},
},
"ScopeTypeCapability": {
reason: "invalid capabilities",
capabilities: []types.Capability{
{
Name: scopeName,
Type: types.TypeScope,
},
},
want: want{
workloads: nil,
traits: nil,
},
},
"PolicyTypeCapability": {
capabilities: []types.Capability{
{

View File

@ -1,23 +0,0 @@
```yaml
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: example-app-rollout
namespace: default
spec:
components:
- name: hello-world-server
type: webservice
properties:
image: crccheck/hello-world
ports:
- port: 8000
expose: true
type: webservice
policies:
- name: health-policy-demo
type: health
properties:
probeInterval: 5
probeTimeout: 10
```

View File

@ -81,7 +81,6 @@ func LoadInstalledCapabilityWithType(userNamespace string, c common.Args, capT t
caps = append(caps, systemCaps...)
}
return caps, nil
case types.TypeScope:
case types.TypeWorkload:
default:
}

View File

@ -76,7 +76,6 @@ func TestCreateMarkdown(t *testing.T) {
workloadName := "workload1"
traitName := "trait1"
scopeName := "scope1"
workloadName2 := "workload2"
workloadCueTemplate := `
@ -146,17 +145,6 @@ variable "acl" {
},
want: nil,
},
"ScopeTypeCapability": {
reason: "invalid capabilities",
ref: ref,
capabilities: []types.Capability{
{
Name: scopeName,
Type: types.TypeScope,
},
},
want: fmt.Errorf("type(scope) of the capability(scope1) is not supported for now"),
},
"TerraformCapabilityInChinese": {
reason: "terraform capability",
ref: refZh,

View File

@ -33,7 +33,6 @@ import (
"sigs.k8s.io/controller-runtime/pkg/log/zap"
core "github.com/oam-dev/kubevela/apis/core.oam.dev"
"github.com/oam-dev/kubevela/apis/standard.oam.dev/v1alpha1"
// +kubebuilder:scaffold:imports
)
@ -56,8 +55,6 @@ var _ = BeforeSuite(func() {
Expect(err).Should(BeNil())
err = crdv1.AddToScheme(scheme)
Expect(err).Should(BeNil())
err = v1alpha1.AddToScheme(scheme)
Expect(err).Should(BeNil())
err = terraformv1beta1.AddToScheme(scheme)
Expect(err).Should(BeNil())
By("Setting up kubernetes client")

View File

@ -1,136 +0,0 @@
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e_multicluster_test
import (
"context"
"fmt"
"os"
"time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/apis/standard.oam.dev/v1alpha1"
"github.com/oam-dev/kubevela/pkg/oam/util"
"sigs.k8s.io/yaml"
)
var _ = PDescribe("Test MultiCluster Rollout", func() {
Context("Test Runtime Cluster Rollout", func() {
var namespace string
var hubCtx context.Context
var workerCtx context.Context
var rollout v1alpha1.Rollout
var componentName string
var targetDeploy appsv1.Deployment
var sourceDeploy appsv1.Deployment
BeforeEach(func() {
hubCtx, workerCtx, namespace = initializeContextAndNamespace()
componentName = "hello-world-server"
})
AfterEach(func() {
cleanUpNamespace(hubCtx, workerCtx, namespace)
ns := v1.Namespace{}
Eventually(func() error { return k8sClient.Get(hubCtx, types.NamespacedName{Name: namespace}, &ns) }, 300*time.Second).Should(util.NotFoundMatcher{})
})
verifySucceed := func(componentRevision string) {
By("check rollout status have succeed")
Eventually(func() error {
rolloutKey := types.NamespacedName{Namespace: namespace, Name: componentName}
if err := k8sClient.Get(workerCtx, rolloutKey, &rollout); err != nil {
return err
}
if rollout.Spec.TargetRevisionName != componentRevision {
return fmt.Errorf("rollout have not point to right targetRevision")
}
if rollout.Status.RollingState != v1alpha1.RolloutSucceedState {
return fmt.Errorf("error rollout status state %s", rollout.Status.RollingState)
}
compRevName := rollout.Spec.TargetRevisionName
deployKey := types.NamespacedName{Namespace: namespace, Name: compRevName}
if err := k8sClient.Get(workerCtx, deployKey, &targetDeploy); err != nil {
return err
}
if *targetDeploy.Spec.Replicas != *rollout.Spec.RolloutPlan.TargetSize {
return fmt.Errorf("targetDeploy replicas missMatch %d, %d", targetDeploy.Spec.Replicas, rollout.Spec.RolloutPlan.TargetSize)
}
if targetDeploy.Status.UpdatedReplicas != *targetDeploy.Spec.Replicas {
return fmt.Errorf("update not finish")
}
if rollout.Status.LastSourceRevision == "" {
return nil
}
deployKey = types.NamespacedName{Namespace: namespace, Name: rollout.Status.LastSourceRevision}
if err := k8sClient.Get(workerCtx, deployKey, &sourceDeploy); err == nil || !apierrors.IsNotFound(err) {
return fmt.Errorf("source deploy still exist")
}
return nil
}, time.Second*60).Should(BeNil())
}
It("Test Rollout whole feature in runtime cluster ", func() {
app := &v1beta1.Application{}
appYaml, err := os.ReadFile("./testdata/app/app-rollout-envbinding.yaml")
Expect(err).Should(Succeed())
Expect(yaml.Unmarshal([]byte(appYaml), app)).Should(Succeed())
app.SetNamespace(namespace)
err = k8sClient.Create(hubCtx, app)
Expect(err).Should(Succeed())
verifySucceed(componentName + "-v1")
By("update application to v2")
checkApp := &v1beta1.Application{}
Eventually(func() error {
if err := k8sClient.Get(hubCtx, types.NamespacedName{Namespace: namespace, Name: app.Name}, checkApp); err != nil {
return err
}
checkApp.Spec.Components[0].Properties.Raw = []byte(`{"image": "stefanprodan/podinfo:5.0.2"}`)
if err := k8sClient.Update(hubCtx, checkApp); err != nil {
return err
}
return nil
}, 30*time.Second).Should(BeNil())
verifySucceed(componentName + "-v2")
By("revert to v1, should guarantee compRev v1 still exist")
appYaml, err = os.ReadFile("./testdata/app/revert-app-envbinding.yaml")
Expect(err).Should(Succeed())
Expect(k8sClient.Get(hubCtx, types.NamespacedName{Namespace: namespace, Name: app.Name}, checkApp)).Should(BeNil())
revertApp := &v1beta1.Application{}
Expect(yaml.Unmarshal([]byte(appYaml), revertApp)).Should(Succeed())
revertApp.SetNamespace(namespace)
revertApp.SetResourceVersion(checkApp.ResourceVersion)
Eventually(func() error {
if err := k8sClient.Update(hubCtx, revertApp); err != nil {
return err
}
return nil
}, 30*time.Second).Should(BeNil())
verifySucceed(componentName + "-v1")
})
})
})

View File

@ -1,37 +0,0 @@
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: example-app
namespace: default
spec:
components:
- name: hello-world-server
type: webservice
properties:
image: stefanprodan/podinfo:4.0.3
traits:
- type: rollout
properties:
targetSize: 2
rolloutBatches:
- replicas: 1
- replicas: 1
policies:
- name: example-multi-env-policy
type: env-binding
properties:
envs:
- name: staging
placement: # selecting the cluster to deploy to
clusterSelector:
name: cluster-worker
workflow:
steps:
# deploy to staging env
- name: deploy-staging
type: deploy2env
properties:
policy: example-multi-env-policy
env: staging

View File

@ -1,50 +0,0 @@
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: example-app-rollout
namespace: default
spec:
components:
- name: hello-world-server
type: webservice
properties:
image: crccheck/hello-world
ports:
- port: 8000
expose: true
type: webservice
traits:
- type: rollout
properties:
targetSize: 2
rolloutBatches:
- replicas: 1
- replicas: 1
policies:
- name: example-multi-env-policy
type: env-binding
properties:
envs:
- name: staging
placement: # 选择要部署的集群,并执行默认的发布策略
clusterSelector:
name: cluster-worker
- name: health-policy-demo
type: health
properties:
probeInterval: 5
probeTimeout: 10
workflow:
steps:
# 部署到预发环境中
- name: deploy-staging
type: deploy2env
properties:
policy: example-multi-env-policy
env: staging

View File

@ -1,38 +0,0 @@
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: example-app
namespace: default
spec:
components:
- name: hello-world-server
type: webservice
properties:
image: stefanprodan/podinfo:5.0.2
traits:
- type: rollout
properties:
targetRevision: hello-world-server-v1
targetSize: 2
rolloutBatches:
- replicas: 1
- replicas: 1
policies:
- name: example-multi-env-policy
type: env-binding
properties:
envs:
- name: staging
placement: # selecting the cluster to deploy to
clusterSelector:
name: cluster-worker
workflow:
steps:
# deploy to staging env
- name: deploy-staging
type: deploy2env
properties:
policy: example-multi-env-policy
env: staging

View File

@ -188,136 +188,6 @@ var _ = Describe("Test application controller clean up appRevision", func() {
return nil
}, time.Second*10, time.Millisecond*500).Should(BeNil())
})
It("Test clean up rollout appRevision", func() {
appName := "app-2"
appKey := types.NamespacedName{Namespace: namespace, Name: appName}
app := getApp(appName, namespace, "normal-worker")
metav1.SetMetaDataAnnotation(&app.ObjectMeta, oam.AnnotationAppRollout, "true")
metav1.SetMetaDataAnnotation(&app.ObjectMeta, oam.AnnotationRollingComponent, "comp1")
Eventually(func() error {
err := k8sClient.Create(ctx, app)
return err
}, 15*time.Second, 300*time.Millisecond).Should(BeNil())
checkApp := new(v1beta1.Application)
for i := 0; i < appRevisionLimit; i++ {
Eventually(func() error {
Expect(k8sClient.Get(ctx, appKey, checkApp)).Should(BeNil())
if checkApp.Status.LatestRevision == nil || checkApp.Status.LatestRevision.Revision != int64(i+1) {
return fmt.Errorf("application point to wrong revision")
}
return nil
}, time.Second*30, time.Microsecond).Should(BeNil())
Eventually(func() error {
checkApp = new(v1beta1.Application)
Expect(k8sClient.Get(ctx, appKey, checkApp)).Should(BeNil())
property := fmt.Sprintf(`{"cmd":["sleep","1000"],"image":"busybox:%d"}`, i)
checkApp.Spec.Components[0].Properties = &runtime.RawExtension{Raw: []byte(property)}
if err := k8sClient.Update(ctx, checkApp); err != nil {
return err
}
return nil
}, time.Second*10, time.Millisecond*500).Should(BeNil())
}
listOpts := []client.ListOption{
client.InNamespace(namespace),
client.MatchingLabels{
oam.LabelAppName: appName,
},
}
appRevisionList := new(v1beta1.ApplicationRevisionList)
Eventually(func() error {
err := k8sClient.List(ctx, appRevisionList, listOpts...)
if err != nil {
return err
}
if len(appRevisionList.Items) != appRevisionLimit+1 {
return fmt.Errorf("error appRevison number wants %d, actually %d", appRevisionLimit+1, len(appRevisionList.Items))
}
return nil
}, time.Second*300, time.Microsecond*300).Should(BeNil())
By("create new appRevision will remove appRevison1")
property := fmt.Sprintf(`{"cmd":["sleep","1000"],"image":"busybox:%d"}`, 5)
Eventually(func() error {
if err := k8sClient.Get(ctx, appKey, checkApp); err != nil {
return err
}
checkApp.Spec.Components[0].Properties = &runtime.RawExtension{Raw: []byte(property)}
return k8sClient.Update(ctx, checkApp)
}, 15*time.Second, 500*time.Millisecond).Should(Succeed())
deletedRevison := new(v1beta1.ApplicationRevision)
revKey := types.NamespacedName{Namespace: namespace, Name: appName + "-v1"}
Eventually(func() error {
err := k8sClient.List(ctx, appRevisionList, listOpts...)
if err != nil {
return err
}
if len(appRevisionList.Items) != appRevisionLimit+1 {
return fmt.Errorf("error appRevison number wants %d, actually %d", appRevisionLimit, len(appRevisionList.Items))
}
err = k8sClient.Get(ctx, revKey, deletedRevison)
if err == nil || !apierrors.IsNotFound(err) {
return fmt.Errorf("haven't clean up the oldest revision")
}
if res, err := util.CheckAppRevision(appRevisionList.Items, []int{2, 3, 4, 5, 6, 7}); err != nil || !res {
return fmt.Errorf("appRevision collection mismatch")
}
return nil
}, time.Second*10, time.Millisecond*500).Should(BeNil())
By("update app again will gc appRevision2")
property = fmt.Sprintf(`{"cmd":["sleep","1000"],"image":"busybox:%d"}`, 6)
Eventually(func() error {
if err := k8sClient.Get(ctx, appKey, checkApp); err != nil {
return err
}
checkApp.Spec.Components[0].Properties = &runtime.RawExtension{Raw: []byte(property)}
return k8sClient.Update(ctx, checkApp)
}, 15*time.Second, 500*time.Millisecond).Should(Succeed())
Eventually(func() error {
err := k8sClient.List(ctx, appRevisionList, listOpts...)
if err != nil {
return err
}
if len(appRevisionList.Items) != appRevisionLimit+1 {
return fmt.Errorf("error appRevison number wants %d, actually %d", appRevisionLimit, len(appRevisionList.Items))
}
revKey = types.NamespacedName{Namespace: namespace, Name: appName + "-v2"}
err = k8sClient.Get(ctx, revKey, deletedRevison)
if err == nil || !apierrors.IsNotFound(err) {
return fmt.Errorf("haven't clean up the revision-2")
}
if res, err := util.CheckAppRevision(appRevisionList.Items, []int{3, 4, 5, 6, 7, 8}); err != nil || !res {
return fmt.Errorf("appRevision collection mismatch")
}
return nil
}, time.Second*10, time.Millisecond*500).Should(BeNil())
By("update app twice will gc appRevision4 not appRevision3")
for i := 7; i < 9; i++ {
Eventually(func() error {
if err := k8sClient.Get(ctx, appKey, checkApp); err != nil {
return err
}
if checkApp.Status.LatestRevision == nil || checkApp.Status.LatestRevision.Revision != int64(i+1) {
return fmt.Errorf("application point to wrong revision")
}
return nil
}, time.Second*10, time.Millisecond*500).Should(BeNil())
Eventually(func() error {
if err := k8sClient.Get(ctx, appKey, checkApp); err != nil {
return err
}
property = fmt.Sprintf(`{"cmd":["sleep","1000"],"image":"busybox:%d"}`, i)
checkApp.Spec.Components[0].Properties = &runtime.RawExtension{Raw: []byte(property)}
if err := k8sClient.Update(ctx, checkApp); err != nil {
return err
}
return nil
}, time.Second*30, time.Microsecond).Should(BeNil())
}
})
})
var (

View File

@ -276,14 +276,6 @@ var _ = Describe("Application Normal tests", func() {
Expect(testApp.Status.Services[0].Traits[1].Message).Should(Equal("secret:app-env-config"))
})
It("Test app have rollout-template false annotation", func() {
By("Apply an application")
var newApp v1beta1.Application
Expect(common.ReadYamlToObject("testdata/app/app5.yaml", &newApp)).Should(BeNil())
newApp.Namespace = namespaceName
Expect(k8sClient.Create(ctx, &newApp)).ShouldNot(BeNil())
})
It("Test app have components with same name", func() {
By("Apply an application")
var newApp v1beta1.Application

View File

@ -1,390 +0,0 @@
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers_test
import (
"context"
"fmt"
"time"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"github.com/oam-dev/kubevela/apis/standard.oam.dev/v1alpha1"
kruise "github.com/openkruise/kruise-api/apps/v1alpha1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/utils/pointer"
"github.com/oam-dev/kubevela/pkg/controller/utils"
appsv1 "k8s.io/api/apps/v1"
"github.com/oam-dev/kubevela/pkg/oam"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/oam-dev/kubevela/pkg/oam/util"
"github.com/oam-dev/kubevela/pkg/utils/common"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
)
var _ = Describe("rollout related e2e-test,Cloneset component rollout tests", func() {
ctx := context.Background()
var namespaceName, componentName, rolloutName string
var ns corev1.Namespace
var rollout v1alpha1.Rollout
var kc kruise.CloneSet
createNamespace := func() {
ns = corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: namespaceName,
},
}
// delete the namespaceName with all its resources
Eventually(
func() error {
return k8sClient.Delete(ctx, &ns, client.PropagationPolicy(metav1.DeletePropagationForeground))
},
time.Second*120, time.Millisecond*500).Should(SatisfyAny(BeNil(), &util.NotFoundMatcher{}))
By("make sure all the resources are removed")
objectKey := client.ObjectKey{
Name: namespaceName,
}
res := &corev1.Namespace{}
Eventually(
func() error {
return k8sClient.Get(ctx, objectKey, res)
},
time.Second*120, time.Millisecond*500).Should(&util.NotFoundMatcher{})
Eventually(
func() error {
return k8sClient.Create(ctx, &ns)
},
time.Second*3, time.Millisecond*300).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
}
verifyRolloutSucceeded := func(compRevName string) {
By("Wait for the rollout to succeed")
Eventually(
func() error {
rollout = v1alpha1.Rollout{}
err := k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: rolloutName}, &rollout)
if err != nil {
return err
}
if rollout.Status.LastUpgradedTargetRevision != compRevName {
return fmt.Errorf("component revision name error %s", compRevName)
}
if rollout.Status.RollingState != v1alpha1.RolloutSucceedState {
return fmt.Errorf("rollout isn't succeed acctauly %s", rollout.Status.RollingState)
}
return nil
},
time.Second*300, 300*time.Millisecond).Should(BeNil())
Expect(rollout.Status.UpgradedReadyReplicas).Should(BeEquivalentTo(rollout.Status.RolloutTargetSize))
Expect(rollout.Status.UpgradedReplicas).Should(BeEquivalentTo(rollout.Status.RolloutTargetSize))
clonesetName := rollout.Spec.ComponentName
By("Wait for resourceTracker to resume the control of cloneset")
Eventually(
func() error {
err := k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: clonesetName}, &kc)
if err != nil {
return err
}
if kc.Status.UpdatedReplicas != *kc.Spec.Replicas {
return fmt.Errorf("expect cloneset updated replicas %d, but got %d",
kc.Status.UpdatedReplicas, *kc.Spec.Replicas)
}
return nil
},
time.Second*60, time.Millisecond*500).Should(BeNil())
// make sure all pods are upgraded
image := kc.Spec.Template.Spec.Containers[0].Image
podList := corev1.PodList{}
Eventually(func() error {
if err := k8sClient.List(ctx, &podList, client.MatchingLabels(kc.Spec.Template.Labels),
client.InNamespace(namespaceName)); err != nil {
return err
}
if len(podList.Items) != int(*kc.Spec.Replicas) {
return fmt.Errorf("expect pod numbers %q, got %q", int(*kc.Spec.Replicas), len(podList.Items))
}
for _, pod := range podList.Items {
gotImage := pod.Spec.Containers[0].Image
if gotImage != image {
return fmt.Errorf("expect pod container image %q, got %q", image, gotImage)
}
if pod.Status.Phase != corev1.PodRunning {
return fmt.Errorf("expect pod phase %q, got %q", corev1.PodRunning, pod.Status.Phase)
}
}
return nil
}, 60*time.Second, 500*time.Millisecond).Should(Succeed())
}
initialScale := func() {
By("Apply the component scale to deploy")
var newRollout v1alpha1.Rollout
Expect(common.ReadYamlToObject("testdata/rollout/cloneset/comp-rollout.yaml", &newRollout)).Should(BeNil())
newRollout.Namespace = namespaceName
compRevName := utils.ConstructRevisionName(componentName, 1)
newRollout.Spec.TargetRevisionName = compRevName
Expect(k8sClient.Create(ctx, &newRollout)).Should(BeNil())
rolloutName = newRollout.Name
verifyRolloutSucceeded(compRevName)
}
applyTwoComponentRevision := func() {
var compRev1, compRev2 appsv1.ControllerRevision
Expect(common.ReadYamlToObject("testdata/rollout/cloneset/compRevSource.yaml", &compRev1)).Should(BeNil())
compRev1.SetNamespace(namespaceName)
Expect(common.ReadYamlToObject("testdata/rollout/cloneset/compRevTarget.yaml", &compRev2)).Should(BeNil())
compRev2.SetNamespace(namespaceName)
Expect(k8sClient.Create(ctx, &compRev1)).Should(BeNil())
Expect(k8sClient.Create(ctx, &compRev2)).Should(BeNil())
}
BeforeEach(func() {
By("Start to run a test, clean up previous resources")
namespaceName = randomNamespaceName("comp-rollout-e2e-test")
createNamespace()
componentName = "metrics-provider"
})
AfterEach(func() {
By("Clean up resources after a test")
Eventually(func() error {
err := k8sClient.Delete(ctx, &ns)
if err == nil || apierrors.IsNotFound(err) {
return nil
}
return err
}, 15*time.Second, 300*time.Microsecond).Should(BeNil())
Eventually(func() error {
err := k8sClient.Delete(ctx, &rollout)
if err == nil || apierrors.IsNotFound(err) {
return nil
}
return err
}, 15*time.Second, 300*time.Microsecond).Should(BeNil())
By(fmt.Sprintf("Delete the entire namespaceName %s", ns.Name))
// delete the namespaceName with all its resources
Expect(k8sClient.Delete(ctx, &ns, client.PropagationPolicy(metav1.DeletePropagationBackground))).Should(BeNil())
})
It("Test component rollout cloneset", func() {
var err error
applyTwoComponentRevision()
By("verify generate two controller revisions")
ctlRevList := appsv1.ControllerRevisionList{}
Eventually(func() error {
if err = k8sClient.List(ctx, &ctlRevList, client.InNamespace(namespaceName),
client.MatchingLabels(map[string]string{oam.LabelControllerRevisionComponent: componentName})); err != nil {
return err
}
if len(ctlRevList.Items) < 2 {
return fmt.Errorf("component revision missmatch actually %d", len(ctlRevList.Items))
}
return nil
}, time.Second*30, 300*time.Millisecond).Should(BeNil())
By("initial scale component revision")
initialScale()
clonesetName := rollout.Spec.ComponentName
By("rollout to compRev 2")
Eventually(func() error {
checkRollout := new(v1alpha1.Rollout)
if err = k8sClient.Get(ctx, types.NamespacedName{Namespace: namespaceName, Name: rolloutName}, checkRollout); err != nil {
return err
}
// we needn't specify sourceRevision, rollout use lastTarget as source
checkRollout.Spec.TargetRevisionName = utils.ConstructRevisionName(componentName, 2)
checkRollout.Spec.RolloutPlan.BatchPartition = pointer.Int32(0)
if err = k8sClient.Update(ctx, checkRollout); err != nil {
return err
}
return nil
}, 30*time.Second, 15*time.Millisecond).Should(BeNil())
By("verify rollout pause in first batch")
checkRollout := new(v1alpha1.Rollout)
Eventually(func() error {
checkRollout = new(v1alpha1.Rollout)
if err = k8sClient.Get(ctx, types.NamespacedName{Namespace: namespaceName, Name: rolloutName}, checkRollout); err != nil {
return err
}
if checkRollout.Status.LastUpgradedTargetRevision != utils.ConstructRevisionName(componentName, 2) {
return fmt.Errorf("last target error")
}
if checkRollout.Status.RollingState != v1alpha1.RollingInBatchesState {
return fmt.Errorf("rollout state error")
}
if checkRollout.Status.CurrentBatch != 0 {
return fmt.Errorf("current batch missmatch")
}
return nil
}, 60*time.Second, 300*time.Millisecond).Should(BeNil())
Eventually(
func() error {
err := k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: clonesetName}, &kc)
if err != nil {
return err
}
if len(kc.OwnerReferences) == 0 {
return fmt.Errorf("cloneset owner missmatch")
}
if kc.OwnerReferences[0].UID != checkRollout.UID || kc.OwnerReferences[0].Kind != v1alpha1.RolloutKind {
return fmt.Errorf("cloneset owner missmatch not rollout Uid %s", checkRollout.UID)
}
if kc.Status.UpdatedReplicas != 3 {
return fmt.Errorf("expect cloneset updated replicas %d, but got %d",
3, *kc.Spec.Replicas)
}
return nil
},
time.Second*120, time.Millisecond*500).Should(BeNil())
Eventually(func() error {
checkRollout := new(v1alpha1.Rollout)
if err = k8sClient.Get(ctx, types.NamespacedName{Namespace: namespaceName, Name: rolloutName}, checkRollout); err != nil {
return err
}
checkRollout.Spec.RolloutPlan.BatchPartition = nil
if err = k8sClient.Update(ctx, checkRollout); err != nil {
return err
}
return nil
}, 30*time.Second, 15*time.Millisecond).Should(BeNil())
verifyRolloutSucceeded(utils.ConstructRevisionName(componentName, 2))
By("continue rollout forward")
Eventually(func() error {
checkRollout := new(v1alpha1.Rollout)
if err = k8sClient.Get(ctx, types.NamespacedName{Namespace: namespaceName, Name: rolloutName}, checkRollout); err != nil {
return err
}
// we needn't specify sourceRevision, rollout use lastTarget as source
checkRollout.Spec.TargetRevisionName = utils.ConstructRevisionName(componentName, 1)
if err = k8sClient.Update(ctx, checkRollout); err != nil {
return err
}
return nil
}, 30*time.Second, 15*time.Millisecond).Should(BeNil())
verifyRolloutSucceeded(utils.ConstructRevisionName(componentName, 1))
})
It("Test component rollout cloneset revert in middle of rollout", func() {
var err error
applyTwoComponentRevision()
By("verify generate two controller revisions")
ctlRevList := appsv1.ControllerRevisionList{}
Eventually(func() error {
if err = k8sClient.List(ctx, &ctlRevList, client.InNamespace(namespaceName),
client.MatchingLabels(map[string]string{oam.LabelControllerRevisionComponent: componentName})); err != nil {
return err
}
if len(ctlRevList.Items) < 2 {
return fmt.Errorf("component revision missmatch acctually %d", len(ctlRevList.Items))
}
return nil
}, time.Second*30, 300*time.Millisecond).Should(BeNil())
By("initial scale component revision")
initialScale()
clonesetName := rollout.Spec.ComponentName
By("rollout to compRev 2")
Eventually(func() error {
checkRollout := new(v1alpha1.Rollout)
if err = k8sClient.Get(ctx, types.NamespacedName{Namespace: namespaceName, Name: rolloutName}, checkRollout); err != nil {
return err
}
// we needn't specify sourceRevision, rollout use lastTarget as source
checkRollout.Spec.TargetRevisionName = utils.ConstructRevisionName(componentName, 2)
checkRollout.Spec.RolloutPlan.BatchPartition = pointer.Int32(0)
if err = k8sClient.Update(ctx, checkRollout); err != nil {
return err
}
return nil
}, 30*time.Second, 15*time.Millisecond).Should(BeNil())
By("verify rollout pause in first batch")
checkRollout := new(v1alpha1.Rollout)
Eventually(func() error {
checkRollout = new(v1alpha1.Rollout)
if err = k8sClient.Get(ctx, types.NamespacedName{Namespace: namespaceName, Name: rolloutName}, checkRollout); err != nil {
return err
}
if checkRollout.Status.LastUpgradedTargetRevision != utils.ConstructRevisionName(componentName, 2) {
return fmt.Errorf("last target error")
}
if checkRollout.Status.RollingState != v1alpha1.RollingInBatchesState {
return fmt.Errorf("rollout state error")
}
if checkRollout.Status.CurrentBatch != 0 {
return fmt.Errorf("current batch missmatch")
}
return nil
}, 60*time.Second, 300*time.Millisecond).Should(BeNil())
Eventually(
func() error {
err := k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: clonesetName}, &kc)
if err != nil {
return err
}
if len(kc.OwnerReferences) == 0 {
return fmt.Errorf("cloneset owner missmatch")
}
if kc.OwnerReferences[0].UID != checkRollout.UID || kc.OwnerReferences[0].Kind != v1alpha1.RolloutKind {
return fmt.Errorf("cloneset owner missmatch not rollout Uid %s", checkRollout.UID)
}
if kc.Status.UpdatedReplicas != 3 {
return fmt.Errorf("expect cloneset updated replicas %d, but got %d",
3, *kc.Spec.Replicas)
}
return nil
},
time.Second*120, time.Millisecond*500).Should(BeNil())
Eventually(func() error {
checkRollout := new(v1alpha1.Rollout)
if err = k8sClient.Get(ctx, types.NamespacedName{Namespace: namespaceName, Name: rolloutName}, checkRollout); err != nil {
return err
}
checkRollout.Spec.TargetRevisionName = utils.ConstructRevisionName(componentName, 1)
checkRollout.Spec.RolloutPlan.BatchPartition = nil
if err = k8sClient.Update(ctx, checkRollout); err != nil {
return err
}
return nil
}, 30*time.Second, 15*time.Millisecond).Should(BeNil())
verifyRolloutSucceeded(utils.ConstructRevisionName(componentName, 1))
By("continue rollout forward")
Eventually(func() error {
checkRollout := new(v1alpha1.Rollout)
if err = k8sClient.Get(ctx, types.NamespacedName{Namespace: namespaceName, Name: rolloutName}, checkRollout); err != nil {
return err
}
// we needn't specify sourceRevision, rollout use lastTarget as source
checkRollout.Spec.TargetRevisionName = utils.ConstructRevisionName(componentName, 2)
if err = k8sClient.Update(ctx, checkRollout); err != nil {
return err
}
return nil
}, 30*time.Second, 15*time.Millisecond).Should(BeNil())
verifyRolloutSucceeded(utils.ConstructRevisionName(componentName, 2))
})
})

View File

@ -1,420 +0,0 @@
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers_test
import (
"context"
"encoding/json"
"fmt"
"time"
v1 "k8s.io/api/apps/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"sigs.k8s.io/controller-runtime/pkg/client"
common2 "github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/oam-dev/kubevela/apis/standard.oam.dev/v1alpha1"
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/oam-dev/kubevela/pkg/oam/util"
"github.com/oam-dev/kubevela/pkg/utils/common"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
)
var _ = Describe("rollout related e2e-test,rollout trait test", func() {
ctx := context.Background()
var namespaceName, componentName, compRevName string
var ns corev1.Namespace
var app v1beta1.Application
var rollout v1alpha1.Rollout
var targerDeploy, sourceDeploy v1.Deployment
var err error
createNamespace := func() {
ns = corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: namespaceName,
},
}
// delete the namespaceName with all its resources
Eventually(
func() error {
return k8sClient.Delete(ctx, &ns, client.PropagationPolicy(metav1.DeletePropagationForeground))
},
time.Second*120, time.Millisecond*500).Should(SatisfyAny(BeNil(), &util.NotFoundMatcher{}))
By("make sure all the resources are removed")
objectKey := client.ObjectKey{
Name: namespaceName,
}
res := &corev1.Namespace{}
Eventually(
func() error {
return k8sClient.Get(ctx, objectKey, res)
},
time.Second*120, time.Millisecond*500).Should(&util.NotFoundMatcher{})
Eventually(
func() error {
return k8sClient.Create(ctx, &ns)
},
time.Second*3, time.Millisecond*300).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
}
verifySuccess := func(componentRevision string) {
By("check rollout status have succeed")
Eventually(func() error {
rolloutKey := types.NamespacedName{Namespace: namespaceName, Name: componentName}
rollout = v1alpha1.Rollout{}
if err := k8sClient.Get(ctx, rolloutKey, &rollout); err != nil {
return err
}
if rollout.Spec.TargetRevisionName != componentRevision {
return fmt.Errorf("rollout have not point to right targetRevision")
}
if rollout.Status.RollingState != v1alpha1.RolloutSucceedState {
return fmt.Errorf("error rollout status state %s", rollout.Status.RollingState)
}
compRevName = rollout.Spec.TargetRevisionName
if rollout.GetAnnotations() == nil || rollout.GetAnnotations()[oam.AnnotationWorkloadName] != componentRevision {
return fmt.Errorf("target workload name annotation missmatch want %s acctually %s",
rollout.GetAnnotations()[oam.AnnotationWorkloadName], componentRevision)
}
deployKey := types.NamespacedName{Namespace: namespaceName, Name: compRevName}
if err := k8sClient.Get(ctx, deployKey, &targerDeploy); err != nil {
return err
}
gvkStr := rollout.GetAnnotations()[oam.AnnotationWorkloadGVK]
gvk := map[string]string{}
if err := json.Unmarshal([]byte(gvkStr), &gvk); err != nil {
return err
}
if gvk["apiVersion"] != "apps/v1" || gvk["kind"] != "Deployment" {
return fmt.Errorf("error targetWorkload gvk")
}
if *targerDeploy.Spec.Replicas != *rollout.Spec.RolloutPlan.TargetSize {
return fmt.Errorf("targetDeploy replicas missMatch %d, %d", targerDeploy.Spec.Replicas, rollout.Spec.RolloutPlan.TargetSize)
}
if targerDeploy.Status.UpdatedReplicas != *targerDeploy.Spec.Replicas {
return fmt.Errorf("update not finish")
}
if rollout.Status.LastSourceRevision == "" {
return nil
}
deployKey = types.NamespacedName{Namespace: namespaceName, Name: rollout.Status.LastSourceRevision}
if err := k8sClient.Get(ctx, deployKey, &sourceDeploy); err == nil || !apierrors.IsNotFound(err) {
return fmt.Errorf("source deploy still exist namespace %s deployName %s", namespaceName, rollout.Status.LastSourceRevision)
}
return nil
}, time.Second*60, 300*time.Millisecond).Should(BeNil())
}
BeforeEach(func() {
By("Start to run a test, init whole env")
namespaceName = randomNamespaceName("rollout-trait-e2e-test")
app = v1beta1.Application{}
createNamespace()
componentName = "express-server"
})
AfterEach(func() {
By("Clean up resources after a test")
Eventually(func() error {
err := k8sClient.Delete(ctx, &app)
if err == nil || apierrors.IsNotFound(err) {
return nil
}
return err
}, 15*time.Second, 300*time.Microsecond).Should(BeNil())
By(fmt.Sprintf("Delete the entire namespaceName %s", ns.Name))
// delete the namespaceName with all its resources
Expect(k8sClient.Delete(ctx, &ns, client.PropagationPolicy(metav1.DeletePropagationBackground))).Should(BeNil())
})
It("rollout as a trait whole process e2e-test", func() {
By("first scale operation")
Expect(common.ReadYamlToObject("testdata/rollout/deployment/application.yaml", &app)).Should(BeNil())
app.Namespace = namespaceName
Expect(k8sClient.Create(ctx, &app)).Should(BeNil())
verifySuccess("express-server-v1")
appKey := types.NamespacedName{Namespace: namespaceName, Name: app.Name}
checkApp := &v1beta1.Application{}
By("update application upgrade to v2")
Eventually(func() error {
if err = k8sClient.Get(ctx, appKey, checkApp); err != nil {
return err
}
checkApp.Spec.Components[0].Properties.Raw = []byte(`{"image":"stefanprodan/podinfo:4.0.3","cpu":"0.1"}`)
if err = k8sClient.Update(ctx, checkApp); err != nil {
return err
}
return nil
}, 30*time.Second, 300*time.Millisecond).Should(BeNil())
verifySuccess("express-server-v2")
By("update application upgrade to v3")
Eventually(func() error {
if err = k8sClient.Get(ctx, appKey, checkApp); err != nil {
return err
}
checkApp.Spec.Components[0].Properties.Raw = []byte(`{"image":"stefanprodan/podinfo:4.0.3","cpu":"0.2"}`)
if err = k8sClient.Update(ctx, checkApp); err != nil {
return err
}
return nil
}, 30*time.Second, 300*time.Millisecond).Should(BeNil())
verifySuccess("express-server-v3")
By("roll back to v2")
time.Sleep(30 * time.Second)
Eventually(func() error {
if err = k8sClient.Get(ctx, appKey, checkApp); err != nil {
return err
}
checkApp.Spec.Components[0].Traits[0].Properties.Raw = []byte(`{"targetRevision":"express-server-v2","rolloutBatches":[{"replicas":1},{"replicas":1}],"targetSize":2}`)
if err = k8sClient.Update(ctx, checkApp); err != nil {
return err
}
return nil
}, 30*time.Second, 300*time.Millisecond).Should(BeNil())
verifySuccess("express-server-v2")
By("modify targetSize to scale")
Eventually(func() error {
if err = k8sClient.Get(ctx, appKey, checkApp); err != nil {
return err
}
checkApp.Spec.Components[0].Traits[0].Properties.Raw = []byte(`{"targetRevision":"express-server-v2","targetSize":4,"rolloutBatches":[{"replicas":1},{"replicas":1}]}`)
if err = k8sClient.Update(ctx, checkApp); err != nil {
return err
}
return nil
}, 30*time.Second, 300*time.Millisecond).Should(BeNil())
time.Sleep(12 * time.Second)
verifySuccess("express-server-v2")
By("update application upgrade to v4")
Eventually(func() error {
if err = k8sClient.Get(ctx, appKey, checkApp); err != nil {
return err
}
checkApp.Spec.Components[0].Properties.Raw = []byte(`{"image":"stefanprodan/podinfo:4.0.3","cpu":"0.3"}`)
checkApp.Spec.Components[0].Traits[0].Properties.Raw =
[]byte(`{"rolloutBatches":[{"replicas":2},{"replicas":2}],"targetSize":4}`)
if err = k8sClient.Update(ctx, checkApp); err != nil {
return err
}
return nil
}, 30*time.Second, 300*time.Millisecond).Should(BeNil())
verifySuccess("express-server-v4")
By("update application batch upgrade to v5")
Eventually(func() error {
if err = k8sClient.Get(ctx, appKey, checkApp); err != nil {
return err
}
checkApp.Spec.Components[0].Properties.Raw = []byte(`{"image":"stefanprodan/podinfo:4.0.3","cpu":"0.31"}`)
checkApp.Spec.Components[0].Traits[0].Properties.Raw =
[]byte(`{"rolloutBatches":[{"replicas":2},{"replicas":2}],"targetSize":4,"batchPartition":0}`)
if err = k8sClient.Update(ctx, checkApp); err != nil {
return err
}
return nil
}, 30*time.Second, 300*time.Millisecond).Should(BeNil())
// check rollout paused in partition 0
time.Sleep(5 * time.Second)
sourceDeploy := v1.Deployment{}
Eventually(func() error {
rolloutKey := types.NamespacedName{Namespace: namespaceName, Name: componentName}
if err := k8sClient.Get(ctx, rolloutKey, &rollout); err != nil {
return err
}
if rollout.Spec.TargetRevisionName != "express-server-v5" {
return fmt.Errorf("rollout have not point to right targetRevision")
}
if rollout.Status.RollingState != v1alpha1.RollingInBatchesState {
return fmt.Errorf("error rollout status state %s", rollout.Status.RollingState)
}
if rollout.Status.CurrentBatch != 0 {
return fmt.Errorf("current batchPartition missmatch accutally %d", rollout.Status.CurrentBatch)
}
deployKey := types.NamespacedName{Namespace: namespaceName, Name: compRevName}
if err := k8sClient.Get(ctx, deployKey, &sourceDeploy); err != nil {
return err
}
if *sourceDeploy.Spec.Replicas != 2 {
return fmt.Errorf("targetDeploy replicas missMatch %d", *sourceDeploy.Spec.Replicas)
}
return nil
}, 300*time.Second, 300*time.Millisecond).Should(BeNil())
By("continue rollout upgrade legacy batches")
Eventually(func() error {
if err = k8sClient.Get(ctx, appKey, checkApp); err != nil {
return err
}
checkApp.Spec.Components[0].Traits[0].Properties.Raw =
[]byte(`{"rolloutBatches":[{"replicas":2},{"replicas":2}],"targetSize":4,"batchPartition":1}`)
if err = k8sClient.Update(ctx, checkApp); err != nil {
return err
}
return nil
}, 30*time.Second, 300*time.Millisecond).Should(BeNil())
verifySuccess("express-server-v5")
By("delete the application, check workload have been removed")
Expect(k8sClient.Delete(ctx, checkApp)).Should(BeNil())
listOptions := []client.ListOption{
client.InNamespace(namespaceName),
}
deployList := &v1.DeploymentList{}
Eventually(func() error {
if err := k8sClient.List(ctx, deployList, listOptions...); err != nil {
return err
}
if len(deployList.Items) != 0 {
return fmt.Errorf("workload have not been removed")
}
return nil
}, 30*time.Second, 300*time.Millisecond).Should(BeNil())
})
It("rollout scale up and down without rollout batches", func() {
By("first scale operation")
Expect(common.ReadYamlToObject("testdata/rollout/deployment/application.yaml", &app)).Should(BeNil())
app.Namespace = namespaceName
Expect(k8sClient.Create(ctx, &app)).Should(BeNil())
verifySuccess("express-server-v1")
By("scale again to targetSize 4")
appKey := types.NamespacedName{Namespace: namespaceName, Name: app.Name}
checkApp := &v1beta1.Application{}
Eventually(func() error {
if err = k8sClient.Get(ctx, appKey, checkApp); err != nil {
return err
}
// scale up without rollout batches, test rollout controller will fill default batches
checkApp.Spec.Components[0].Traits[0].Properties.Raw =
[]byte(`{"targetSize":4}`)
if err = k8sClient.Update(ctx, checkApp); err != nil {
return err
}
return nil
}, 30*time.Second, 300*time.Millisecond).Should(BeNil())
Eventually(func() error {
checkRollout := v1alpha1.Rollout{}
if err := k8sClient.Get(ctx, types.NamespacedName{Namespace: namespaceName, Name: componentName}, &checkRollout); err != nil {
return err
}
if *checkRollout.Spec.RolloutPlan.TargetSize != 4 {
return fmt.Errorf("rollout targetSize haven't update")
}
if len(checkRollout.Spec.RolloutPlan.RolloutBatches) != 1 {
return fmt.Errorf("fail to fill rollout batches")
}
if checkRollout.Spec.RolloutPlan.RolloutBatches[0].Replicas != intstr.FromInt(2) {
return fmt.Errorf("fill rollout batches missmatch")
}
return nil
}, 30*time.Second, 300*time.Millisecond).Should(BeNil())
verifySuccess("express-server-v1")
checkApp = &v1beta1.Application{}
By("update application upgrade to v2")
Eventually(func() error {
if err = k8sClient.Get(ctx, appKey, checkApp); err != nil {
return err
}
checkApp.Spec.Components[0].Properties.Raw = []byte(`{"image":"stefanprodan/podinfo:4.0.3","cpu":"0.1"}`)
checkApp.Spec.Components[0].Traits[0].Properties.Raw =
[]byte(`{"rolloutBatches":[{"replicas":2},{"replicas":2}],"targetSize":4}`)
if err = k8sClient.Update(ctx, checkApp); err != nil {
return err
}
return nil
}, 30*time.Second, 300*time.Millisecond).Should(BeNil())
verifySuccess("express-server-v2")
By("scale down to targetSize 2")
appKey = types.NamespacedName{Namespace: namespaceName, Name: app.Name}
checkApp = &v1beta1.Application{}
Eventually(func() error {
if err = k8sClient.Get(ctx, appKey, checkApp); err != nil {
return err
}
// scale down without rollout batches, test rollout controller will fill default batches
checkApp.Spec.Components[0].Traits[0].Properties.Raw =
[]byte(`{"targetSize":2}`)
if err = k8sClient.Update(ctx, checkApp); err != nil {
return err
}
return nil
}, 30*time.Second, 300*time.Millisecond).Should(BeNil())
Eventually(func() error {
checkRollout := v1alpha1.Rollout{}
if err := k8sClient.Get(ctx, types.NamespacedName{Namespace: namespaceName, Name: componentName}, &checkRollout); err != nil {
return err
}
if *checkRollout.Spec.RolloutPlan.TargetSize != 2 {
return fmt.Errorf("rollout targetSize haven't update")
}
if len(checkRollout.Spec.RolloutPlan.RolloutBatches) != 1 {
return fmt.Errorf("fail to fill rollout batches")
}
if checkRollout.Spec.RolloutPlan.RolloutBatches[0].Replicas != intstr.FromInt(2) {
return fmt.Errorf("fill rollout batches missmatch")
}
return nil
}, 30*time.Second, 300*time.Millisecond).Should(BeNil())
verifySuccess("express-server-v2")
})
It("Delete a component with rollout trait from an application should delete this workload", func() {
By("first scale operation")
Expect(common.ReadYamlToObject("testdata/rollout/deployment/multi_comp_app.yaml", &app)).Should(BeNil())
app.Namespace = namespaceName
Expect(k8sClient.Create(ctx, &app)).Should(BeNil())
verifySuccess("express-server-v1")
componentName = "express-server-another"
verifySuccess("express-server-another-v1")
By("delete a component")
Eventually(func() error {
checkApp := &v1beta1.Application{}
if err := k8sClient.Get(ctx, types.NamespacedName{Namespace: namespaceName, Name: app.Name}, checkApp); err != nil {
return err
}
checkApp.Spec.Components = []common2.ApplicationComponent{checkApp.Spec.Components[0]}
if err := k8sClient.Update(ctx, checkApp); err != nil {
return err
}
return nil
}, 30*time.Second, 300*time.Millisecond).Should(BeNil())
By("check deployment have been gc")
Eventually(func() error {
checkApp := &v1beta1.Application{}
if err := k8sClient.Get(ctx, types.NamespacedName{Namespace: namespaceName, Name: app.Name}, checkApp); err != nil {
return err
}
if len(checkApp.Spec.Components) != 1 || checkApp.Spec.Components[0].Name != "express-server" {
return fmt.Errorf("app hasn't update yet")
}
deploy := v1.Deployment{}
if err := k8sClient.Get(ctx, types.NamespacedName{Namespace: namespaceName, Name: "express-server-another-v1"}, &deploy); err == nil || !apierrors.IsNotFound(err) {
return fmt.Errorf("another deployment haven't been delete")
}
return nil
}, 30*time.Second, 300*time.Millisecond).Should(BeNil())
})
})

View File

@ -24,8 +24,6 @@ import (
"testing"
"time"
"github.com/oam-dev/kubevela/apis/standard.oam.dev/v1alpha1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@ -84,8 +82,6 @@ var _ = BeforeSuite(func() {
depSchemeBuilder.Register(depExample.DeepCopyObject())
err = depSchemeBuilder.AddToScheme(scheme)
Expect(err).Should(BeNil())
err = v1alpha1.AddToScheme(scheme)
Expect(err).Should(BeNil())
By("Setting up kubernetes client")
k8sClient, err = client.New(config.GetConfigOrDie(), client.Options{Scheme: scheme})
if err != nil {

View File

@ -1,15 +0,0 @@
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
annotations:
"app.oam.dev/rollout-template": "false"
name: app-e2e
spec:
components:
- name: myweb
type: worker
properties:
image: "stefanprodan/podinfo:4.0.3"
cmd:
- ./podinfo
- stress-cpu=1

View File

@ -1,17 +0,0 @@
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: test-rolling
spec:
components:
- name: metrics-provider
type: worker
properties:
cmd:
- ./podinfo
- stress-cpu=3.0
image: stefanprodan/podinfo:4.0.6
port: 8080
rolloutPlan:
rolloutStrategy: "IncreaseFirst"
targetSize: 3

View File

@ -1,58 +0,0 @@
rollout: {
type: "trait"
annotations: {}
labels: {}
description: "Configures Canary deployment strategy for your application."
attributes: {
appliesToWorkloads: ["deployments.apps"]
podDisruptive: true
definitionRef: name: "canaries.flagger.app"
workloadRefPath: "spec.targetRef"
revisionEnabled: true
extension: install: helm: {
repo: "oam-flagger"
name: "flagger"
namespace: "vela-system"
url: "https://oam.dev/flagger/archives/"
version: "1.1.0"
}
}
}
template: {
outputs: rollout: {{
apiVersion: "flagger.app/v1beta1"
kind: "Canary"
spec: {
provider: "smi"
progressDeadlineSeconds: 60
service: {
// Currently Traffic route is not supported, but this is required field for flagger CRD
port: 80
// Currently Traffic route is not supported, but this is required field for flagger CRD
targetPort: 8080
}
analysis: {
interval: parameter.interval
// max number of failed metric checks before rollback
threshold: 10
// max traffic percentage routed to canary
// percentage (0-100)
maxWeight: 50
// canary increment step
// percentage (0-100)
stepWeight: parameter.stepWeight
// max replicas scale up to canary
maxReplicas: parameter.replicas
}
}
}}
parameter: {
// +usage=Total replicas of the workload
replicas: *2 | int
// +alias=step-weight
// +usage=Weight percent of every step in rolling update
stepWeight: *50 | int
// +usage=Schedule interval time
interval: *"30s" | string
}
}

View File

@ -1,65 +0,0 @@
# Code generated by KubeVela templates. DO NOT EDIT. Please edit the original cue file.
# Definition source cue file: vela-templates/definitions/registry/rollout.cue
apiVersion: core.oam.dev/v1beta1
kind: TraitDefinition
metadata:
annotations:
definition.oam.dev/description: Configures Canary deployment strategy for your application.
name: rollout
namespace: vela-system
spec:
appliesToWorkloads:
- deployments.apps
definitionRef:
name: canaries.flagger.app
extension:
install:
helm:
name: flagger
namespace: vela-system
repo: oam-flagger
url: https://oam.dev/flagger/archives/
version: 1.1.0
podDisruptive: true
revisionEnabled: true
schematic:
cue:
template: |
outputs: rollout: {{
apiVersion: "flagger.app/v1beta1"
kind: "Canary"
spec: {
provider: "smi"
progressDeadlineSeconds: 60
service: {
// Currently Traffic route is not supported, but this is required field for flagger CRD
port: 80
// Currently Traffic route is not supported, but this is required field for flagger CRD
targetPort: 8080
}
analysis: {
interval: parameter.interval
// max number of failed metric checks before rollback
threshold: 10
// max traffic percentage routed to canary
// percentage (0-100)
maxWeight: 50
// canary increment step
// percentage (0-100)
stepWeight: parameter.stepWeight
// max replicas scale up to canary
maxReplicas: parameter.replicas
}
}
}}
parameter: {
// +usage=Total replicas of the workload
replicas: *2 | int
// +alias=step-weight
// +usage=Weight percent of every step in rolling update
stepWeight: *50 | int
// +usage=Schedule interval time
interval: *"30s" | string
}
workloadRefPath: spec.targetRef