mirror of https://github.com/kubevela/kubevela.git
Compare commits
10 Commits
26b84e4109
...
afe8ef0acf
| Author | SHA1 | Date |
|---|---|---|
|
|
afe8ef0acf | |
|
|
3e48313545 | |
|
|
33fba39cc8 | |
|
|
7e0c528375 | |
|
|
5f69a7038c | |
|
|
461fe363ca | |
|
|
33498288ee | |
|
|
d627ecea2a | |
|
|
d8a17740dc | |
|
|
05b0ec89a5 |
|
|
@ -15,9 +15,7 @@ spec:
|
|||
schematic:
|
||||
cue:
|
||||
template: |
|
||||
import (
|
||||
"vela/op"
|
||||
)
|
||||
import "vela/op"
|
||||
|
||||
output: op.#ApplyApplicationInParallel & {}
|
||||
|
||||
|
|
|
|||
|
|
@ -16,9 +16,7 @@ spec:
|
|||
schematic:
|
||||
cue:
|
||||
template: |
|
||||
import (
|
||||
"vela/op"
|
||||
)
|
||||
import "vela/op"
|
||||
|
||||
// apply application
|
||||
output: op.#ApplyApplication & {}
|
||||
|
|
|
|||
|
|
@ -13,12 +13,8 @@ spec:
|
|||
schematic:
|
||||
cue:
|
||||
template: |
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"vela/kube"
|
||||
"vela/builtin"
|
||||
)
|
||||
import "vela/kube"
|
||||
import "vela/builtin"
|
||||
|
||||
output: kube.#Apply & {
|
||||
$params: {
|
||||
|
|
|
|||
|
|
@ -12,9 +12,7 @@ spec:
|
|||
schematic:
|
||||
cue:
|
||||
template: |
|
||||
import (
|
||||
"vela/kube"
|
||||
)
|
||||
import "vela/kube"
|
||||
|
||||
apply: kube.#Apply & {
|
||||
$params: parameter
|
||||
|
|
|
|||
|
|
@ -16,9 +16,7 @@ spec:
|
|||
schematic:
|
||||
cue:
|
||||
template: |
|
||||
import (
|
||||
"vela/op"
|
||||
)
|
||||
import "vela/op"
|
||||
|
||||
// apply remaining components and traits
|
||||
apply: op.#ApplyRemaining & {
|
||||
|
|
|
|||
|
|
@ -13,10 +13,8 @@ spec:
|
|||
schematic:
|
||||
cue:
|
||||
template: |
|
||||
import (
|
||||
"vela/kube"
|
||||
"vela/builtin"
|
||||
)
|
||||
import "vela/kube"
|
||||
import "vela/builtin"
|
||||
|
||||
apply: kube.#Apply & {
|
||||
$params: value: {
|
||||
|
|
|
|||
|
|
@ -13,12 +13,9 @@ spec:
|
|||
schematic:
|
||||
cue:
|
||||
template: |
|
||||
import (
|
||||
"vela/config"
|
||||
"vela/kube"
|
||||
"vela/builtin"
|
||||
"strings"
|
||||
)
|
||||
import "vela/config"
|
||||
import "vela/kube"
|
||||
import "vela/builtin"
|
||||
|
||||
cfg: config.#CreateConfig & {
|
||||
$params: {
|
||||
|
|
@ -87,9 +84,9 @@ spec:
|
|||
}
|
||||
}
|
||||
providerBasic: {
|
||||
accessKey: string
|
||||
secretKey: string
|
||||
region: string
|
||||
accessKey!: string
|
||||
secretKey!: string
|
||||
region!: string
|
||||
}
|
||||
#AlibabaProvider: {
|
||||
providerBasic
|
||||
|
|
@ -141,5 +138,5 @@ spec:
|
|||
type: "ucloud"
|
||||
name: *"ucloud-provider" | string
|
||||
}
|
||||
parameter: *#AlibabaProvider | #AWSProvider | #AzureProvider | #BaiduProvider | #ECProvider | #GCPProvider | #TencentProvider | #UCloudProvider
|
||||
parameter: #AlibabaProvider | #AWSProvider | #AzureProvider | #BaiduProvider | #ECProvider | #GCPProvider | #TencentProvider | #UCloudProvider
|
||||
|
||||
|
|
|
|||
|
|
@ -13,13 +13,10 @@ spec:
|
|||
schematic:
|
||||
cue:
|
||||
template: |
|
||||
import (
|
||||
"vela/builtin"
|
||||
"vela/kube"
|
||||
"vela/util"
|
||||
"encoding/json"
|
||||
"strings"
|
||||
)
|
||||
import "vela/builtin"
|
||||
import "vela/kube"
|
||||
import "vela/util"
|
||||
import "strings"
|
||||
|
||||
url: {
|
||||
if parameter.context.git != _|_ {
|
||||
|
|
|
|||
|
|
@ -14,10 +14,8 @@ spec:
|
|||
schematic:
|
||||
cue:
|
||||
template: |
|
||||
import (
|
||||
"vela/metrics"
|
||||
"vela/builtin"
|
||||
)
|
||||
import "vela/metrics"
|
||||
import "vela/builtin"
|
||||
|
||||
check: metrics.#PromCheck & {
|
||||
$params: {
|
||||
|
|
|
|||
|
|
@ -12,9 +12,7 @@ spec:
|
|||
schematic:
|
||||
cue:
|
||||
template: |
|
||||
import (
|
||||
"vela/kube"
|
||||
)
|
||||
import "vela/kube"
|
||||
|
||||
parameter: {
|
||||
labelselector?: {...}
|
||||
|
|
|
|||
|
|
@ -12,11 +12,9 @@ spec:
|
|||
schematic:
|
||||
cue:
|
||||
template: |
|
||||
import (
|
||||
"vela/builtin"
|
||||
"vela/query"
|
||||
"strconv"
|
||||
)
|
||||
import "vela/builtin"
|
||||
import "vela/query"
|
||||
import "strconv"
|
||||
|
||||
collect: query.#CollectServiceEndpoints & {
|
||||
$params: app: {
|
||||
|
|
|
|||
|
|
@ -16,6 +16,8 @@ spec:
|
|||
schematic:
|
||||
cue:
|
||||
template: |
|
||||
import "list"
|
||||
|
||||
#PatchParams: {
|
||||
// +usage=Specify the name of the target container, if not set, use the component name
|
||||
containerName: *"" | string
|
||||
|
|
@ -73,7 +75,7 @@ spec:
|
|||
}
|
||||
|
||||
// +patchStrategy=replace
|
||||
args: [for a in _args if _delArgs[a] == _|_ {a}] + [for a in _addArgs if _delArgs[a] == _|_ && _argsMap[a] == _|_ {a}]
|
||||
args: list.Concat([[for a in _args if _delArgs[a] == _|_ {a}], [for a in _addArgs if _delArgs[a] == _|_ && _argsMap[a] == _|_ {a}]])
|
||||
}
|
||||
}
|
||||
// +patchStrategy=open
|
||||
|
|
|
|||
|
|
@ -17,10 +17,9 @@ spec:
|
|||
schematic:
|
||||
cue:
|
||||
template: |
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
import "strconv"
|
||||
import "strings"
|
||||
import "list"
|
||||
|
||||
#PatchParams: {
|
||||
// +usage=Specify the name of the target container, if not set, use the component name
|
||||
|
|
@ -67,7 +66,7 @@ spec:
|
|||
_basePortsMap: {for _basePort in _basePorts {(strings.ToLower(_basePort.protocol) + strconv.FormatInt(_basePort.containerPort, 10)): _basePort}}
|
||||
_portsMap: {for port in _params.ports {(strings.ToLower(port.protocol) + strconv.FormatInt(port.containerPort, 10)): port}}
|
||||
// +patchStrategy=replace
|
||||
ports: [for portVar in _basePorts {
|
||||
ports: list.Concat([[for portVar in _basePorts {
|
||||
containerPort: portVar.containerPort
|
||||
protocol: portVar.protocol
|
||||
name: portVar.name
|
||||
|
|
@ -80,7 +79,7 @@ spec:
|
|||
hostIP: _portsMap[_uniqueKey].hostIP
|
||||
}
|
||||
}
|
||||
}] + [for port in _params.ports if _basePortsMap[strings.ToLower(port.protocol)+strconv.FormatInt(port.containerPort, 10)] == _|_ {
|
||||
}], [for port in _params.ports if _basePortsMap[strings.ToLower(port.protocol)+strconv.FormatInt(port.containerPort, 10)] == _|_ {
|
||||
if port.containerPort != _|_ {
|
||||
containerPort: port.containerPort
|
||||
}
|
||||
|
|
@ -93,7 +92,7 @@ spec:
|
|||
if port.hostIP != _|_ {
|
||||
hostIP: port.hostIP
|
||||
}
|
||||
}]
|
||||
}]])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -12,9 +12,7 @@ spec:
|
|||
schematic:
|
||||
cue:
|
||||
template: |
|
||||
import (
|
||||
"vela/config"
|
||||
)
|
||||
import "vela/config"
|
||||
|
||||
deploy: config.#CreateConfig & {
|
||||
$params: parameter
|
||||
|
|
|
|||
|
|
@ -11,6 +11,8 @@ spec:
|
|||
schematic:
|
||||
cue:
|
||||
template: |
|
||||
import "list"
|
||||
|
||||
mountsArray: {
|
||||
pvc: *[
|
||||
for v in parameter.volumeMounts.pvc {
|
||||
|
|
@ -130,7 +132,7 @@ spec:
|
|||
},
|
||||
] | []
|
||||
}
|
||||
volumesList: volumesArray.pvc + volumesArray.configMap + volumesArray.secret + volumesArray.emptyDir + volumesArray.hostPath
|
||||
volumesList: list.Concat([volumesArray.pvc, volumesArray.configMap, volumesArray.secret, volumesArray.emptyDir, volumesArray.hostPath])
|
||||
deDupVolumesArray: [
|
||||
for val in [
|
||||
for i, vi in volumesList {
|
||||
|
|
|
|||
|
|
@ -11,9 +11,7 @@ spec:
|
|||
schematic:
|
||||
cue:
|
||||
template: |
|
||||
import (
|
||||
"strconv"
|
||||
)
|
||||
import "strconv"
|
||||
|
||||
mountsArray: [
|
||||
if parameter.volumeMounts != _|_ && parameter.volumeMounts.pvc != _|_ for v in parameter.volumeMounts.pvc {
|
||||
|
|
|
|||
|
|
@ -12,9 +12,7 @@ spec:
|
|||
schematic:
|
||||
cue:
|
||||
template: |
|
||||
import (
|
||||
"vela/config"
|
||||
)
|
||||
import "vela/config"
|
||||
|
||||
deploy: config.#DeleteConfig & {
|
||||
$params: parameter
|
||||
|
|
|
|||
|
|
@ -12,11 +12,9 @@ spec:
|
|||
schematic:
|
||||
cue:
|
||||
template: |
|
||||
import (
|
||||
"vela/kube"
|
||||
"vela/builtin"
|
||||
"encoding/yaml"
|
||||
)
|
||||
import "vela/kube"
|
||||
import "vela/builtin"
|
||||
import "encoding/yaml"
|
||||
|
||||
dependsOn: kube.#Read & {
|
||||
$params: value: {
|
||||
|
|
|
|||
|
|
@ -14,9 +14,7 @@ spec:
|
|||
schematic:
|
||||
cue:
|
||||
template: |
|
||||
import (
|
||||
"vela/op"
|
||||
)
|
||||
import "vela/op"
|
||||
|
||||
app: op.#DeployCloudResource & {
|
||||
env: parameter.env
|
||||
|
|
|
|||
|
|
@ -14,10 +14,9 @@ spec:
|
|||
schematic:
|
||||
cue:
|
||||
template: |
|
||||
import (
|
||||
"vela/multicluster"
|
||||
"vela/builtin"
|
||||
)
|
||||
import "vela/multicluster"
|
||||
import "vela/builtin"
|
||||
|
||||
|
||||
if parameter.auto == false {
|
||||
suspend: builtin.#Suspend & {$params: message: "Waiting approval to the deploy step \"\(context.stepName)\""}
|
||||
|
|
|
|||
|
|
@ -15,9 +15,7 @@ spec:
|
|||
schematic:
|
||||
cue:
|
||||
template: |
|
||||
import (
|
||||
"vela/op"
|
||||
)
|
||||
import "vela/op"
|
||||
|
||||
app: op.#ApplyEnvBindApp & {
|
||||
env: parameter.env
|
||||
|
|
|
|||
|
|
@ -15,9 +15,7 @@ spec:
|
|||
schematic:
|
||||
cue:
|
||||
template: |
|
||||
import (
|
||||
"vela/op"
|
||||
)
|
||||
import "vela/op"
|
||||
|
||||
app: op.#Steps & {
|
||||
load: op.#Load
|
||||
|
|
|
|||
|
|
@ -16,6 +16,8 @@ spec:
|
|||
schematic:
|
||||
cue:
|
||||
template: |
|
||||
import "list"
|
||||
|
||||
#PatchParams: {
|
||||
// +usage=Specify the name of the target container, if not set, use the component name
|
||||
containerName: *"" | string
|
||||
|
|
@ -49,7 +51,7 @@ spec:
|
|||
if _baseEnv != _|_ {
|
||||
_baseEnvMap: {for envVar in _baseEnv {(envVar.name): envVar}}
|
||||
// +patchStrategy=replace
|
||||
env: [for envVar in _baseEnv if _delKeys[envVar.name] == _|_ && !_params.replace {
|
||||
env: list.Concat([[for envVar in _baseEnv if _delKeys[envVar.name] == _|_ && !_params.replace {
|
||||
name: envVar.name
|
||||
if _params.env[envVar.name] != _|_ {
|
||||
value: _params.env[envVar.name]
|
||||
|
|
@ -62,10 +64,10 @@ spec:
|
|||
valueFrom: envVar.valueFrom
|
||||
}
|
||||
}
|
||||
}] + [for k, v in _params.env if _delKeys[k] == _|_ && (_params.replace || _baseEnvMap[k] == _|_) {
|
||||
}], [for k, v in _params.env if _delKeys[k] == _|_ && (_params.replace || _baseEnvMap[k] == _|_) {
|
||||
name: k
|
||||
value: v
|
||||
}]
|
||||
}]])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -14,10 +14,8 @@ spec:
|
|||
schematic:
|
||||
cue:
|
||||
template: |
|
||||
import (
|
||||
"vela/op"
|
||||
"vela/kube"
|
||||
)
|
||||
import "vela/op"
|
||||
import "vela/kube"
|
||||
|
||||
object: {
|
||||
apiVersion: "v1"
|
||||
|
|
|
|||
|
|
@ -14,10 +14,8 @@ spec:
|
|||
schematic:
|
||||
cue:
|
||||
template: |
|
||||
import (
|
||||
"vela/op"
|
||||
"vela/kube"
|
||||
)
|
||||
import "vela/op"
|
||||
import "vela/kube"
|
||||
|
||||
meta: {
|
||||
name: *context.name | string
|
||||
|
|
|
|||
|
|
@ -12,9 +12,7 @@ spec:
|
|||
schematic:
|
||||
cue:
|
||||
template: |
|
||||
import (
|
||||
"vela/kube"
|
||||
)
|
||||
import "vela/kube"
|
||||
|
||||
apply: kube.#Apply & {
|
||||
$params: {
|
||||
|
|
|
|||
|
|
@ -12,11 +12,9 @@ spec:
|
|||
schematic:
|
||||
cue:
|
||||
template: |
|
||||
import (
|
||||
"vela/kube"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
)
|
||||
import "vela/kube"
|
||||
import "encoding/base64"
|
||||
import "encoding/json"
|
||||
|
||||
secret: {
|
||||
data: *parameter.data | {}
|
||||
|
|
|
|||
|
|
@ -15,10 +15,8 @@ spec:
|
|||
schematic:
|
||||
cue:
|
||||
template: |
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
import "strconv"
|
||||
import "strings"
|
||||
|
||||
outputs: service: {
|
||||
apiVersion: "v1"
|
||||
|
|
|
|||
|
|
@ -17,6 +17,7 @@ spec:
|
|||
template: |
|
||||
import "strconv"
|
||||
|
||||
|
||||
let nameSuffix = {
|
||||
if parameter.name != _|_ {"-" + parameter.name}
|
||||
if parameter.name == _|_ {""}
|
||||
|
|
|
|||
|
|
@ -12,11 +12,9 @@ spec:
|
|||
schematic:
|
||||
cue:
|
||||
template: |
|
||||
import (
|
||||
"vela/kube"
|
||||
"vela/util"
|
||||
"encoding/base64"
|
||||
)
|
||||
import "vela/kube"
|
||||
import "vela/util"
|
||||
import "encoding/base64"
|
||||
|
||||
output: kube.#Read & {
|
||||
$params: value: {
|
||||
|
|
|
|||
|
|
@ -17,6 +17,8 @@ spec:
|
|||
schematic:
|
||||
cue:
|
||||
template: |
|
||||
import "list"
|
||||
|
||||
patch: spec: template: spec: {
|
||||
// +patchKey=name
|
||||
containers: [{
|
||||
|
|
@ -43,10 +45,10 @@ spec:
|
|||
}
|
||||
|
||||
// +patchKey=name
|
||||
volumeMounts: [{
|
||||
volumeMounts: list.Concat([[{
|
||||
name: parameter.mountName
|
||||
mountPath: parameter.initMountPath
|
||||
}] + parameter.extraVolumeMounts
|
||||
}], parameter.extraVolumeMounts])
|
||||
}]
|
||||
// +patchKey=name
|
||||
volumes: [{
|
||||
|
|
|
|||
|
|
@ -12,9 +12,7 @@ spec:
|
|||
schematic:
|
||||
cue:
|
||||
template: |
|
||||
import (
|
||||
"vela/config"
|
||||
)
|
||||
import "vela/config"
|
||||
|
||||
output: config.#ListConfig & {
|
||||
$params: parameter
|
||||
|
|
|
|||
|
|
@ -19,9 +19,7 @@ spec:
|
|||
schematic:
|
||||
cue:
|
||||
template: |
|
||||
import (
|
||||
"encoding/json"
|
||||
)
|
||||
import "encoding/json"
|
||||
|
||||
outputs: nocalhostService: {
|
||||
apiVersion: "v1"
|
||||
|
|
|
|||
|
|
@ -12,14 +12,12 @@ spec:
|
|||
schematic:
|
||||
cue:
|
||||
template: |
|
||||
import (
|
||||
"vela/http"
|
||||
"vela/email"
|
||||
"vela/kube"
|
||||
"vela/util"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
)
|
||||
import "vela/http"
|
||||
import "vela/email"
|
||||
import "vela/kube"
|
||||
import "vela/util"
|
||||
import "encoding/base64"
|
||||
import "encoding/json"
|
||||
|
||||
parameter: {
|
||||
// +usage=Please fulfill its url and message if you want to send Lark messages
|
||||
|
|
|
|||
|
|
@ -12,9 +12,7 @@ spec:
|
|||
schematic:
|
||||
cue:
|
||||
template: |
|
||||
import (
|
||||
"vela/builtin"
|
||||
)
|
||||
import "vela/builtin"
|
||||
|
||||
parameter: message: string
|
||||
|
||||
|
|
|
|||
|
|
@ -12,9 +12,7 @@ spec:
|
|||
schematic:
|
||||
cue:
|
||||
template: |
|
||||
import (
|
||||
"vela/config"
|
||||
)
|
||||
import "vela/config"
|
||||
|
||||
output: config.#ReadConfig & {
|
||||
$params: parameter
|
||||
|
|
|
|||
|
|
@ -12,9 +12,7 @@ spec:
|
|||
schematic:
|
||||
cue:
|
||||
template: |
|
||||
import (
|
||||
"vela/kube"
|
||||
)
|
||||
import "vela/kube"
|
||||
|
||||
output: kube.#Read & {
|
||||
$params: {
|
||||
|
|
|
|||
|
|
@ -13,11 +13,9 @@ spec:
|
|||
schematic:
|
||||
cue:
|
||||
template: |
|
||||
import (
|
||||
"vela/op"
|
||||
"vela/http"
|
||||
"encoding/json"
|
||||
)
|
||||
import "vela/op"
|
||||
import "vela/http"
|
||||
import "encoding/json"
|
||||
|
||||
req: http.#HTTPDo & {
|
||||
$params: {
|
||||
|
|
|
|||
|
|
@ -14,9 +14,7 @@ spec:
|
|||
schematic:
|
||||
cue:
|
||||
template: |
|
||||
import (
|
||||
"vela/op"
|
||||
)
|
||||
import "vela/op"
|
||||
|
||||
app: op.#ShareCloudResource & {
|
||||
env: parameter.env
|
||||
|
|
|
|||
|
|
@ -11,10 +11,8 @@ spec:
|
|||
schematic:
|
||||
cue:
|
||||
template: |
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
import "strconv"
|
||||
import "strings"
|
||||
|
||||
mountsArray: [
|
||||
if parameter.volumeMounts != _|_ if parameter.volumeMounts.pvc != _|_ for v in parameter.volumeMounts.pvc {
|
||||
|
|
|
|||
|
|
@ -12,9 +12,7 @@ spec:
|
|||
schematic:
|
||||
cue:
|
||||
template: |
|
||||
import (
|
||||
"vela/builtin"
|
||||
)
|
||||
import "vela/builtin"
|
||||
|
||||
suspend: builtin.#Suspend & {
|
||||
$params: parameter
|
||||
|
|
|
|||
|
|
@ -12,11 +12,9 @@ spec:
|
|||
schematic:
|
||||
cue:
|
||||
template: |
|
||||
import (
|
||||
"vela/kube"
|
||||
"vela/builtin"
|
||||
"vela/util"
|
||||
)
|
||||
import "vela/kube"
|
||||
import "vela/builtin"
|
||||
import "vela/util"
|
||||
|
||||
mountsArray: [
|
||||
if parameter.storage != _|_ && parameter.storage.secret != _|_ for v in parameter.storage.secret {
|
||||
|
|
|
|||
|
|
@ -12,13 +12,11 @@ spec:
|
|||
schematic:
|
||||
cue:
|
||||
template: |
|
||||
import (
|
||||
"vela/http"
|
||||
"vela/kube"
|
||||
"vela/util"
|
||||
"encoding/json"
|
||||
"encoding/base64"
|
||||
)
|
||||
import "vela/http"
|
||||
import "vela/kube"
|
||||
import "vela/util"
|
||||
import "encoding/json"
|
||||
import "encoding/base64"
|
||||
|
||||
data: {
|
||||
if parameter.data == _|_ {
|
||||
|
|
|
|||
|
|
@ -11,10 +11,8 @@ spec:
|
|||
schematic:
|
||||
cue:
|
||||
template: |
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
import "strconv"
|
||||
import "strings"
|
||||
|
||||
mountsArray: [
|
||||
if parameter.volumeMounts != _|_ && parameter.volumeMounts.pvc != _|_ for v in parameter.volumeMounts.pvc {
|
||||
|
|
|
|||
|
|
@ -0,0 +1,38 @@
|
|||
/*
|
||||
Copyright 2025 The KubeVela Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"github.com/spf13/pflag"
|
||||
|
||||
standardcontroller "github.com/oam-dev/kubevela/pkg/controller"
|
||||
)
|
||||
|
||||
// AdmissionConfig contains admission control configuration.
|
||||
type AdmissionConfig struct {
|
||||
// Fields will be populated based on what standardcontroller.AddAdmissionFlags sets
|
||||
}
|
||||
|
||||
// NewAdmissionConfig creates a new AdmissionConfig with defaults.
|
||||
func NewAdmissionConfig() *AdmissionConfig {
|
||||
return &AdmissionConfig{}
|
||||
}
|
||||
|
||||
// AddFlags registers admission configuration flags.
|
||||
func (c *AdmissionConfig) AddFlags(fs *pflag.FlagSet) {
|
||||
standardcontroller.AddAdmissionFlags(fs)
|
||||
}
|
||||
|
|
@ -0,0 +1,56 @@
|
|||
/*
|
||||
Copyright 2025 The KubeVela Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
|
||||
commonconfig "github.com/oam-dev/kubevela/pkg/controller/common"
|
||||
)
|
||||
|
||||
// ApplicationConfig contains application-specific configuration.
|
||||
type ApplicationConfig struct {
|
||||
ReSyncPeriod time.Duration
|
||||
}
|
||||
|
||||
// NewApplicationConfig creates a new ApplicationConfig with defaults.
|
||||
func NewApplicationConfig() *ApplicationConfig {
|
||||
return &ApplicationConfig{
|
||||
ReSyncPeriod: commonconfig.ApplicationReSyncPeriod,
|
||||
}
|
||||
}
|
||||
|
||||
// AddFlags registers application configuration flags.
|
||||
func (c *ApplicationConfig) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.DurationVar(&c.ReSyncPeriod,
|
||||
"application-re-sync-period",
|
||||
c.ReSyncPeriod,
|
||||
"Re-sync period for application to re-sync, also known as the state-keep interval.")
|
||||
}
|
||||
|
||||
// SyncToApplicationGlobals syncs the parsed configuration values to application package global variables.
|
||||
// This should be called after flag parsing to ensure the application controller uses the configured values.
|
||||
//
|
||||
// NOTE: This method exists for backward compatibility with legacy code that depends on global
|
||||
// variables in the commonconfig package. Ideally, configuration should be injected rather than using globals.
|
||||
//
|
||||
// The flow is: CLI flags -> ApplicationConfig struct fields -> commonconfig globals (via this method)
|
||||
func (c *ApplicationConfig) SyncToApplicationGlobals() {
|
||||
commonconfig.ApplicationReSyncPeriod = c.ReSyncPeriod
|
||||
}
|
||||
|
|
@ -0,0 +1,40 @@
|
|||
/*
|
||||
Copyright 2025 The KubeVela Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
pkgclient "github.com/kubevela/pkg/controller/client"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
// ClientConfig contains controller client configuration.
|
||||
// This wraps the external package's client configuration flags.
|
||||
type ClientConfig struct {
|
||||
// Note: The actual configuration is managed by the pkgclient package
|
||||
// This is a wrapper to maintain consistency with our config pattern
|
||||
}
|
||||
|
||||
// NewClientConfig creates a new ClientConfig with defaults.
|
||||
func NewClientConfig() *ClientConfig {
|
||||
return &ClientConfig{}
|
||||
}
|
||||
|
||||
// AddFlags registers client configuration flags.
|
||||
// Delegates to the external package's flag registration.
|
||||
func (c *ClientConfig) AddFlags(fs *pflag.FlagSet) {
|
||||
pkgclient.AddTimeoutControllerClientFlags(fs)
|
||||
}
|
||||
|
|
@ -0,0 +1,67 @@
|
|||
/*
|
||||
Copyright 2025 The KubeVela Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"github.com/spf13/pflag"
|
||||
|
||||
oamcontroller "github.com/oam-dev/kubevela/pkg/controller/core.oam.dev"
|
||||
)
|
||||
|
||||
// ControllerConfig wraps the oamcontroller.Args configuration.
|
||||
// While this appears to duplicate the Args struct, it serves as the new home for
|
||||
// controller flag registration after the AddFlags method was moved here from
|
||||
// the oamcontroller package during refactoring.
|
||||
type ControllerConfig struct {
|
||||
// Embed the existing Args struct to reuse its fields
|
||||
oamcontroller.Args
|
||||
}
|
||||
|
||||
// NewControllerConfig creates a new ControllerConfig with defaults.
|
||||
func NewControllerConfig() *ControllerConfig {
|
||||
return &ControllerConfig{
|
||||
Args: oamcontroller.Args{
|
||||
RevisionLimit: 50,
|
||||
AppRevisionLimit: 10,
|
||||
DefRevisionLimit: 20,
|
||||
AutoGenWorkloadDefinition: true,
|
||||
ConcurrentReconciles: 4,
|
||||
IgnoreAppWithoutControllerRequirement: false,
|
||||
IgnoreDefinitionWithoutControllerRequirement: false,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// AddFlags registers controller configuration flags.
|
||||
// This method was moved here from oamcontroller.Args during refactoring
|
||||
// to centralize configuration management.
|
||||
func (c *ControllerConfig) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.IntVar(&c.RevisionLimit, "revision-limit", c.RevisionLimit,
|
||||
"RevisionLimit is the maximum number of revisions that will be maintained. The default value is 50.")
|
||||
fs.IntVar(&c.AppRevisionLimit, "application-revision-limit", c.AppRevisionLimit,
|
||||
"application-revision-limit is the maximum number of application useless revisions that will be maintained, if the useless revisions exceed this number, older ones will be GCed first.The default value is 10.")
|
||||
fs.IntVar(&c.DefRevisionLimit, "definition-revision-limit", c.DefRevisionLimit,
|
||||
"definition-revision-limit is the maximum number of component/trait definition useless revisions that will be maintained, if the useless revisions exceed this number, older ones will be GCed first.The default value is 20.")
|
||||
fs.BoolVar(&c.AutoGenWorkloadDefinition, "autogen-workload-definition", c.AutoGenWorkloadDefinition,
|
||||
"Automatic generated workloadDefinition which componentDefinition refers to.")
|
||||
fs.IntVar(&c.ConcurrentReconciles, "concurrent-reconciles", c.ConcurrentReconciles,
|
||||
"concurrent-reconciles is the concurrent reconcile number of the controller. The default value is 4")
|
||||
fs.BoolVar(&c.IgnoreAppWithoutControllerRequirement, "ignore-app-without-controller-version", c.IgnoreAppWithoutControllerRequirement,
|
||||
"If true, application controller will not process the app without 'app.oam.dev/controller-version-require' annotation")
|
||||
fs.BoolVar(&c.IgnoreDefinitionWithoutControllerRequirement, "ignore-definition-without-controller-version", c.IgnoreDefinitionWithoutControllerRequirement,
|
||||
"If true, trait/component/workflowstep definition controller will not process the definition without 'definition.oam.dev/controller-version-require' annotation")
|
||||
}
|
||||
|
|
@ -0,0 +1,61 @@
|
|||
/*
|
||||
Copyright 2025 The KubeVela Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"github.com/kubevela/pkg/cue/cuex"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
// CUEConfig contains CUE language configuration.
|
||||
type CUEConfig struct {
|
||||
EnableExternalPackage bool
|
||||
EnableExternalPackageWatch bool
|
||||
}
|
||||
|
||||
// NewCUEConfig creates a new CUEConfig with defaults.
|
||||
func NewCUEConfig() *CUEConfig {
|
||||
return &CUEConfig{
|
||||
EnableExternalPackage: cuex.EnableExternalPackageForDefaultCompiler,
|
||||
EnableExternalPackageWatch: cuex.EnableExternalPackageWatchForDefaultCompiler,
|
||||
}
|
||||
}
|
||||
|
||||
// AddFlags registers CUE configuration flags.
|
||||
func (c *CUEConfig) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.BoolVar(&c.EnableExternalPackage,
|
||||
"enable-external-package-for-default-compiler",
|
||||
c.EnableExternalPackage,
|
||||
"Enable loading third-party CUE packages into the default CUE compiler. When enabled, external CUE packages can be imported and used in CUE templates.")
|
||||
fs.BoolVar(&c.EnableExternalPackageWatch,
|
||||
"enable-external-package-watch-for-default-compiler",
|
||||
c.EnableExternalPackageWatch,
|
||||
"Enable watching for changes in external CUE packages and automatically reload them when modified. Requires enable-external-package-for-default-compiler to be enabled.")
|
||||
}
|
||||
|
||||
// SyncToCUEGlobals syncs the parsed configuration values to CUE package global variables.
|
||||
// This should be called after flag parsing to ensure the CUE compiler uses the configured values.
|
||||
//
|
||||
// NOTE: This method exists for backward compatibility with legacy code that depends on global
|
||||
// variables in the cuex package. Ideally, the CUE compiler configuration should be injected
|
||||
// rather than relying on globals.
|
||||
//
|
||||
// The flow is: CLI flags -> CUEConfig struct fields -> cuex globals (via this method)
|
||||
func (c *CUEConfig) SyncToCUEGlobals() {
|
||||
cuex.EnableExternalPackageForDefaultCompiler = c.EnableExternalPackage
|
||||
cuex.EnableExternalPackageWatchForDefaultCompiler = c.EnableExternalPackageWatch
|
||||
}
|
||||
|
|
@ -0,0 +1,40 @@
|
|||
/*
|
||||
Copyright 2025 The KubeVela Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"github.com/spf13/pflag"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
)
|
||||
|
||||
// FeatureConfig contains feature gate configuration.
|
||||
// This wraps the Kubernetes feature gate system.
|
||||
type FeatureConfig struct {
|
||||
// Note: The actual configuration is managed by the utilfeature package
|
||||
// This is a wrapper to maintain consistency with our config pattern
|
||||
}
|
||||
|
||||
// NewFeatureConfig creates a new FeatureConfig with defaults.
|
||||
func NewFeatureConfig() *FeatureConfig {
|
||||
return &FeatureConfig{}
|
||||
}
|
||||
|
||||
// AddFlags registers feature gate configuration flags.
|
||||
// Delegates to the Kubernetes feature gate system.
|
||||
func (c *FeatureConfig) AddFlags(fs *pflag.FlagSet) {
|
||||
utilfeature.DefaultMutableFeatureGate.AddFlag(fs)
|
||||
}
|
||||
|
|
@ -0,0 +1,42 @@
|
|||
/*
|
||||
Copyright 2025 The KubeVela Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
utillog "github.com/kubevela/pkg/util/log"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
// KLogConfig contains klog configuration.
|
||||
// This wraps the Kubernetes logging configuration.
|
||||
type KLogConfig struct {
|
||||
// Reference to observability config for log settings
|
||||
observability *ObservabilityConfig
|
||||
}
|
||||
|
||||
// NewKLogConfig creates a new KLogConfig.
|
||||
func NewKLogConfig(observability *ObservabilityConfig) *KLogConfig {
|
||||
return &KLogConfig{
|
||||
observability: observability,
|
||||
}
|
||||
}
|
||||
|
||||
// AddFlags registers klog configuration flags.
|
||||
func (c *KLogConfig) AddFlags(fs *pflag.FlagSet) {
|
||||
// Add base klog flags
|
||||
utillog.AddFlags(fs)
|
||||
}
|
||||
|
|
@ -0,0 +1,49 @@
|
|||
/*
|
||||
Copyright 2025 The KubeVela Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
// KubernetesConfig contains Kubernetes API client configuration.
|
||||
type KubernetesConfig struct {
|
||||
QPS float64
|
||||
Burst int
|
||||
InformerSyncPeriod time.Duration
|
||||
}
|
||||
|
||||
// NewKubernetesConfig creates a new KubernetesConfig with defaults.
|
||||
func NewKubernetesConfig() *KubernetesConfig {
|
||||
return &KubernetesConfig{
|
||||
QPS: 50,
|
||||
Burst: 100,
|
||||
InformerSyncPeriod: 10 * time.Hour,
|
||||
}
|
||||
}
|
||||
|
||||
// AddFlags registers Kubernetes configuration flags.
|
||||
func (c *KubernetesConfig) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.Float64Var(&c.QPS, "kube-api-qps", c.QPS,
|
||||
"the qps for reconcile clients. Low qps may lead to low throughput. High qps may give stress to api-server. Raise this value if concurrent-reconciles is set to be high.")
|
||||
fs.IntVar(&c.Burst, "kube-api-burst", c.Burst,
|
||||
"the burst for reconcile clients. Recommend setting it qps*2.")
|
||||
fs.DurationVar(&c.InformerSyncPeriod, "informer-sync-period", c.InformerSyncPeriod,
|
||||
"The re-sync period for informer in controller-runtime. This is a system-level configuration.")
|
||||
}
|
||||
|
|
@ -0,0 +1,53 @@
|
|||
/*
|
||||
Copyright 2025 The KubeVela Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
pkgmulticluster "github.com/kubevela/pkg/multicluster"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
// MultiClusterConfig contains multi-cluster configuration.
|
||||
type MultiClusterConfig struct {
|
||||
EnableClusterGateway bool
|
||||
EnableClusterMetrics bool
|
||||
ClusterMetricsInterval time.Duration
|
||||
}
|
||||
|
||||
// NewMultiClusterConfig creates a new MultiClusterConfig with defaults.
|
||||
func NewMultiClusterConfig() *MultiClusterConfig {
|
||||
return &MultiClusterConfig{
|
||||
EnableClusterGateway: false,
|
||||
EnableClusterMetrics: false,
|
||||
ClusterMetricsInterval: 15 * time.Second,
|
||||
}
|
||||
}
|
||||
|
||||
// AddFlags registers multi-cluster configuration flags.
|
||||
func (c *MultiClusterConfig) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.BoolVar(&c.EnableClusterGateway, "enable-cluster-gateway", c.EnableClusterGateway,
|
||||
"Enable cluster-gateway to use multicluster, disabled by default.")
|
||||
fs.BoolVar(&c.EnableClusterMetrics, "enable-cluster-metrics", c.EnableClusterMetrics,
|
||||
"Enable cluster-metrics-management to collect metrics from clusters with cluster-gateway, disabled by default. When this param is enabled, enable-cluster-gateway should be enabled")
|
||||
fs.DurationVar(&c.ClusterMetricsInterval, "cluster-metrics-interval", c.ClusterMetricsInterval,
|
||||
"The interval that ClusterMetricsMgr will collect metrics from clusters, default value is 15 seconds.")
|
||||
|
||||
// Also register additional multicluster flags from external package
|
||||
pkgmulticluster.AddFlags(fs)
|
||||
}
|
||||
|
|
@ -0,0 +1,54 @@
|
|||
/*
|
||||
Copyright 2025 The KubeVela Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"github.com/spf13/pflag"
|
||||
|
||||
"github.com/oam-dev/kubevela/pkg/oam"
|
||||
)
|
||||
|
||||
// OAMConfig contains OAM-specific configuration.
|
||||
type OAMConfig struct {
|
||||
SystemDefinitionNamespace string
|
||||
}
|
||||
|
||||
// NewOAMConfig creates a new OAMConfig with defaults.
|
||||
func NewOAMConfig() *OAMConfig {
|
||||
return &OAMConfig{
|
||||
SystemDefinitionNamespace: "vela-system",
|
||||
}
|
||||
}
|
||||
|
||||
// AddFlags registers OAM configuration flags.
|
||||
func (c *OAMConfig) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.StringVar(&c.SystemDefinitionNamespace,
|
||||
"system-definition-namespace",
|
||||
c.SystemDefinitionNamespace,
|
||||
"Define the namespace of the system-level definition")
|
||||
}
|
||||
|
||||
// SyncToOAMGlobals syncs the parsed configuration values to OAM package global variables.
|
||||
// This should be called after flag parsing to ensure the OAM runtime uses the configured values.
|
||||
//
|
||||
// NOTE: This method exists for backward compatibility with legacy code that depends on global
|
||||
// variables in the oam package. Ideally, configuration should be injected rather than using globals.
|
||||
//
|
||||
// The flow is: CLI flags -> OAMConfig struct fields -> oam globals (via this method)
|
||||
func (c *OAMConfig) SyncToOAMGlobals() {
|
||||
oam.SystemDefinitionNamespace = c.SystemDefinitionNamespace
|
||||
}
|
||||
|
|
@ -0,0 +1,55 @@
|
|||
/*
|
||||
Copyright 2025 The KubeVela Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
// ObservabilityConfig contains metrics and logging configuration.
|
||||
type ObservabilityConfig struct {
|
||||
MetricsAddr string
|
||||
LogFilePath string
|
||||
LogFileMaxSize uint64
|
||||
LogDebug bool
|
||||
DevLogs bool
|
||||
}
|
||||
|
||||
// NewObservabilityConfig creates a new ObservabilityConfig with defaults.
|
||||
func NewObservabilityConfig() *ObservabilityConfig {
|
||||
return &ObservabilityConfig{
|
||||
MetricsAddr: ":8080",
|
||||
LogFilePath: "",
|
||||
LogFileMaxSize: 1024,
|
||||
LogDebug: false,
|
||||
DevLogs: false,
|
||||
}
|
||||
}
|
||||
|
||||
// AddFlags registers observability configuration flags.
|
||||
func (c *ObservabilityConfig) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.StringVar(&c.MetricsAddr, "metrics-addr", c.MetricsAddr,
|
||||
"The address the metric endpoint binds to.")
|
||||
fs.StringVar(&c.LogFilePath, "log-file-path", c.LogFilePath,
|
||||
"The file to write logs to.")
|
||||
fs.Uint64Var(&c.LogFileMaxSize, "log-file-max-size", c.LogFileMaxSize,
|
||||
"Defines the maximum size a log file can grow to, Unit is megabytes.")
|
||||
fs.BoolVar(&c.LogDebug, "log-debug", c.LogDebug,
|
||||
"Enable debug logs for development purpose")
|
||||
fs.BoolVar(&c.DevLogs, "dev-logs", c.DevLogs,
|
||||
"Enable ANSI color formatting for console logs (ignored when log-file-path is set)")
|
||||
}
|
||||
|
|
@ -0,0 +1,58 @@
|
|||
/*
|
||||
Copyright 2025 The KubeVela Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"github.com/spf13/pflag"
|
||||
|
||||
standardcontroller "github.com/oam-dev/kubevela/pkg/controller"
|
||||
commonconfig "github.com/oam-dev/kubevela/pkg/controller/common"
|
||||
)
|
||||
|
||||
// PerformanceConfig contains performance and optimization configuration.
|
||||
type PerformanceConfig struct {
|
||||
PerfEnabled bool
|
||||
}
|
||||
|
||||
// NewPerformanceConfig creates a new PerformanceConfig with defaults.
|
||||
func NewPerformanceConfig() *PerformanceConfig {
|
||||
return &PerformanceConfig{
|
||||
PerfEnabled: commonconfig.PerfEnabled,
|
||||
}
|
||||
}
|
||||
|
||||
// AddFlags registers performance configuration flags.
|
||||
func (c *PerformanceConfig) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.BoolVar(&c.PerfEnabled,
|
||||
"perf-enabled",
|
||||
c.PerfEnabled,
|
||||
"Enable performance logging for controllers, disabled by default.")
|
||||
|
||||
// Add optimization flags from the standard controller
|
||||
standardcontroller.AddOptimizeFlags(fs)
|
||||
}
|
||||
|
||||
// SyncToPerformanceGlobals syncs the parsed configuration values to performance package global variables.
|
||||
// This should be called after flag parsing to ensure the performance monitoring uses the configured values.
|
||||
//
|
||||
// NOTE: This method exists for backward compatibility with legacy code that depends on global
|
||||
// variables in the commonconfig package. Ideally, configuration should be injected rather than using globals.
|
||||
//
|
||||
// The flow is: CLI flags -> PerformanceConfig struct fields -> commonconfig globals (via this method)
|
||||
func (c *PerformanceConfig) SyncToPerformanceGlobals() {
|
||||
commonconfig.PerfEnabled = c.PerfEnabled
|
||||
}
|
||||
|
|
@ -0,0 +1,40 @@
|
|||
/*
|
||||
Copyright 2025 The KubeVela Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"github.com/kubevela/pkg/util/profiling"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
// ProfilingConfig contains profiling configuration.
|
||||
// This wraps the external package's profiling configuration flags.
|
||||
type ProfilingConfig struct {
|
||||
// Note: The actual configuration is managed by the profiling package
|
||||
// This is a wrapper to maintain consistency with our config pattern
|
||||
}
|
||||
|
||||
// NewProfilingConfig creates a new ProfilingConfig with defaults.
|
||||
func NewProfilingConfig() *ProfilingConfig {
|
||||
return &ProfilingConfig{}
|
||||
}
|
||||
|
||||
// AddFlags registers profiling configuration flags.
|
||||
// Delegates to the external package's flag registration.
|
||||
func (c *ProfilingConfig) AddFlags(fs *pflag.FlagSet) {
|
||||
profiling.AddFlags(fs)
|
||||
}
|
||||
|
|
@ -0,0 +1,40 @@
|
|||
/*
|
||||
Copyright 2025 The KubeVela Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
ctrlrec "github.com/kubevela/pkg/controller/reconciler"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
// ReconcileConfig contains controller reconciliation configuration.
|
||||
// This wraps the external package's reconciler configuration flags.
|
||||
type ReconcileConfig struct {
|
||||
// Note: The actual configuration is managed by the ctrlrec package
|
||||
// This is a wrapper to maintain consistency with our config pattern
|
||||
}
|
||||
|
||||
// NewReconcileConfig creates a new ReconcileConfig with defaults.
|
||||
func NewReconcileConfig() *ReconcileConfig {
|
||||
return &ReconcileConfig{}
|
||||
}
|
||||
|
||||
// AddFlags registers reconcile configuration flags.
|
||||
// Delegates to the external package's flag registration.
|
||||
func (c *ReconcileConfig) AddFlags(fs *pflag.FlagSet) {
|
||||
ctrlrec.AddFlags(fs)
|
||||
}
|
||||
|
|
@ -0,0 +1,55 @@
|
|||
/*
|
||||
Copyright 2025 The KubeVela Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"github.com/spf13/pflag"
|
||||
|
||||
"github.com/oam-dev/kubevela/pkg/resourcekeeper"
|
||||
)
|
||||
|
||||
// ResourceConfig contains resource management configuration.
|
||||
type ResourceConfig struct {
|
||||
MaxDispatchConcurrent int
|
||||
}
|
||||
|
||||
// NewResourceConfig creates a new ResourceConfig with defaults.
|
||||
func NewResourceConfig() *ResourceConfig {
|
||||
return &ResourceConfig{
|
||||
MaxDispatchConcurrent: 10,
|
||||
}
|
||||
}
|
||||
|
||||
// AddFlags registers resource configuration flags.
|
||||
func (c *ResourceConfig) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.IntVar(&c.MaxDispatchConcurrent,
|
||||
"max-dispatch-concurrent",
|
||||
c.MaxDispatchConcurrent,
|
||||
"Set the max dispatch concurrent number, default is 10")
|
||||
}
|
||||
|
||||
// SyncToResourceGlobals syncs the parsed configuration values to resource package global variables.
|
||||
// This should be called after flag parsing to ensure the resource keeper uses the configured values.
|
||||
//
|
||||
// NOTE: This method exists for backward compatibility with legacy code that depends on global
|
||||
// variables in the resourcekeeper package. The long-term goal should be to refactor to use
|
||||
// dependency injection rather than globals.
|
||||
//
|
||||
// The flow is: CLI flags -> ResourceConfig struct fields -> resourcekeeper globals (via this method)
|
||||
func (c *ResourceConfig) SyncToResourceGlobals() {
|
||||
resourcekeeper.MaxDispatchConcurrent = c.MaxDispatchConcurrent
|
||||
}
|
||||
|
|
@ -0,0 +1,65 @@
|
|||
/*
|
||||
Copyright 2025 The KubeVela Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
// ServerConfig contains server-level configuration.
|
||||
type ServerConfig struct {
|
||||
HealthAddr string
|
||||
StorageDriver string
|
||||
EnableLeaderElection bool
|
||||
LeaderElectionNamespace string
|
||||
LeaseDuration time.Duration
|
||||
RenewDeadline time.Duration
|
||||
RetryPeriod time.Duration
|
||||
}
|
||||
|
||||
// NewServerConfig creates a new ServerConfig with defaults.
|
||||
func NewServerConfig() *ServerConfig {
|
||||
return &ServerConfig{
|
||||
HealthAddr: ":9440",
|
||||
StorageDriver: "Local",
|
||||
EnableLeaderElection: false,
|
||||
LeaderElectionNamespace: "",
|
||||
LeaseDuration: 15 * time.Second,
|
||||
RenewDeadline: 10 * time.Second,
|
||||
RetryPeriod: 2 * time.Second,
|
||||
}
|
||||
}
|
||||
|
||||
// AddFlags registers server configuration flags.
|
||||
func (c *ServerConfig) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.StringVar(&c.HealthAddr, "health-addr", c.HealthAddr,
|
||||
"The address the health endpoint binds to.")
|
||||
fs.StringVar(&c.StorageDriver, "storage-driver", c.StorageDriver,
|
||||
"Application storage driver.")
|
||||
fs.BoolVar(&c.EnableLeaderElection, "enable-leader-election", c.EnableLeaderElection,
|
||||
"Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.")
|
||||
fs.StringVar(&c.LeaderElectionNamespace, "leader-election-namespace", c.LeaderElectionNamespace,
|
||||
"Determines the namespace in which the leader election configmap will be created.")
|
||||
fs.DurationVar(&c.LeaseDuration, "leader-election-lease-duration", c.LeaseDuration,
|
||||
"The duration that non-leader candidates will wait to force acquire leadership")
|
||||
fs.DurationVar(&c.RenewDeadline, "leader-election-renew-deadline", c.RenewDeadline,
|
||||
"The duration that the acting controlplane will retry refreshing leadership before giving up")
|
||||
fs.DurationVar(&c.RetryPeriod, "leader-election-retry-period", c.RetryPeriod,
|
||||
"The duration the LeaderElector clients should wait between tries of actions")
|
||||
}
|
||||
|
|
@ -0,0 +1,40 @@
|
|||
/*
|
||||
Copyright 2025 The KubeVela Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"github.com/kubevela/pkg/controller/sharding"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
// ShardingConfig contains controller sharding configuration.
|
||||
// This wraps the external package's sharding configuration flags.
|
||||
type ShardingConfig struct {
|
||||
// Note: The actual configuration is managed by the sharding package
|
||||
// This is a wrapper to maintain consistency with our config pattern
|
||||
}
|
||||
|
||||
// NewShardingConfig creates a new ShardingConfig with defaults.
|
||||
func NewShardingConfig() *ShardingConfig {
|
||||
return &ShardingConfig{}
|
||||
}
|
||||
|
||||
// AddFlags registers sharding configuration flags.
|
||||
// Delegates to the external package's flag registration.
|
||||
func (c *ShardingConfig) AddFlags(fs *pflag.FlagSet) {
|
||||
sharding.AddFlags(fs)
|
||||
}
|
||||
|
|
@ -0,0 +1,47 @@
|
|||
/*
|
||||
Copyright 2025 The KubeVela Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
// WebhookConfig contains webhook configuration.
|
||||
type WebhookConfig struct {
|
||||
UseWebhook bool
|
||||
CertDir string
|
||||
WebhookPort int
|
||||
}
|
||||
|
||||
// NewWebhookConfig creates a new WebhookConfig with defaults.
|
||||
func NewWebhookConfig() *WebhookConfig {
|
||||
return &WebhookConfig{
|
||||
UseWebhook: false,
|
||||
CertDir: "/k8s-webhook-server/serving-certs",
|
||||
WebhookPort: 9443,
|
||||
}
|
||||
}
|
||||
|
||||
// AddFlags registers webhook configuration flags.
|
||||
func (c *WebhookConfig) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.BoolVar(&c.UseWebhook, "use-webhook", c.UseWebhook,
|
||||
"Enable Admission Webhook")
|
||||
fs.StringVar(&c.CertDir, "webhook-cert-dir", c.CertDir,
|
||||
"Admission webhook cert/key dir.")
|
||||
fs.IntVar(&c.WebhookPort, "webhook-port", c.WebhookPort,
|
||||
"admission webhook listen address")
|
||||
}
|
||||
|
|
@ -0,0 +1,69 @@
|
|||
/*
|
||||
Copyright 2025 The KubeVela Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"github.com/spf13/pflag"
|
||||
|
||||
wfTypes "github.com/kubevela/workflow/pkg/types"
|
||||
)
|
||||
|
||||
// WorkflowConfig contains workflow engine configuration.
|
||||
type WorkflowConfig struct {
|
||||
MaxWaitBackoffTime int
|
||||
MaxFailedBackoffTime int
|
||||
MaxStepErrorRetryTimes int
|
||||
}
|
||||
|
||||
// NewWorkflowConfig creates a new WorkflowConfig with defaults.
|
||||
func NewWorkflowConfig() *WorkflowConfig {
|
||||
return &WorkflowConfig{
|
||||
MaxWaitBackoffTime: 60,
|
||||
MaxFailedBackoffTime: 300,
|
||||
MaxStepErrorRetryTimes: 10,
|
||||
}
|
||||
}
|
||||
|
||||
// AddFlags registers workflow configuration flags.
|
||||
func (c *WorkflowConfig) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.IntVar(&c.MaxWaitBackoffTime,
|
||||
"max-workflow-wait-backoff-time",
|
||||
c.MaxWaitBackoffTime,
|
||||
"Set the max workflow wait backoff time, default is 60")
|
||||
fs.IntVar(&c.MaxFailedBackoffTime,
|
||||
"max-workflow-failed-backoff-time",
|
||||
c.MaxFailedBackoffTime,
|
||||
"Set the max workflow failed backoff time, default is 300")
|
||||
fs.IntVar(&c.MaxStepErrorRetryTimes,
|
||||
"max-workflow-step-error-retry-times",
|
||||
c.MaxStepErrorRetryTimes,
|
||||
"Set the max workflow step error retry times, default is 10")
|
||||
}
|
||||
|
||||
// SyncToWorkflowGlobals syncs the parsed configuration values to workflow package global variables.
|
||||
// This should be called after flag parsing to ensure the workflow engine uses the configured values.
|
||||
//
|
||||
// NOTE: This method exists for backward compatibility with legacy code that depends on global
|
||||
// variables in the wfTypes package. The long-term goal should be to refactor the workflow
|
||||
// package to accept configuration via dependency injection rather than globals.
|
||||
//
|
||||
// The flow is: CLI flags -> WorkflowConfig struct fields -> wfTypes globals (via this method)
|
||||
func (c *WorkflowConfig) SyncToWorkflowGlobals() {
|
||||
wfTypes.MaxWorkflowWaitBackoffTime = c.MaxWaitBackoffTime
|
||||
wfTypes.MaxWorkflowFailedBackoffTime = c.MaxFailedBackoffTime
|
||||
wfTypes.MaxWorkflowStepErrorRetryTimes = c.MaxStepErrorRetryTimes
|
||||
}
|
||||
|
|
@ -17,88 +17,81 @@ limitations under the License.
|
|||
package options
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/kubevela/pkg/cue/cuex"
|
||||
|
||||
pkgclient "github.com/kubevela/pkg/controller/client"
|
||||
ctrlrec "github.com/kubevela/pkg/controller/reconciler"
|
||||
"github.com/kubevela/pkg/controller/sharding"
|
||||
pkgmulticluster "github.com/kubevela/pkg/multicluster"
|
||||
utillog "github.com/kubevela/pkg/util/log"
|
||||
"github.com/kubevela/pkg/util/profiling"
|
||||
wfTypes "github.com/kubevela/workflow/pkg/types"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
cliflag "k8s.io/component-base/cli/flag"
|
||||
|
||||
standardcontroller "github.com/oam-dev/kubevela/pkg/controller"
|
||||
commonconfig "github.com/oam-dev/kubevela/pkg/controller/common"
|
||||
oamcontroller "github.com/oam-dev/kubevela/pkg/controller/core.oam.dev"
|
||||
"github.com/oam-dev/kubevela/pkg/oam"
|
||||
"github.com/oam-dev/kubevela/pkg/resourcekeeper"
|
||||
"github.com/oam-dev/kubevela/cmd/core/app/config"
|
||||
)
|
||||
|
||||
// CoreOptions contains everything necessary to create and run vela-core
|
||||
type CoreOptions struct {
|
||||
UseWebhook bool
|
||||
CertDir string
|
||||
WebhookPort int
|
||||
MetricsAddr string
|
||||
EnableLeaderElection bool
|
||||
LeaderElectionNamespace string
|
||||
LogFilePath string
|
||||
LogFileMaxSize uint64
|
||||
LogDebug bool
|
||||
DevLogs bool
|
||||
ControllerArgs *oamcontroller.Args
|
||||
HealthAddr string
|
||||
StorageDriver string
|
||||
InformerSyncPeriod time.Duration
|
||||
QPS float64
|
||||
Burst int
|
||||
LeaseDuration time.Duration
|
||||
RenewDeadLine time.Duration
|
||||
RetryPeriod time.Duration
|
||||
EnableClusterGateway bool
|
||||
EnableClusterMetrics bool
|
||||
ClusterMetricsInterval time.Duration
|
||||
// Config modules - clean, well-organized configuration
|
||||
Server *config.ServerConfig
|
||||
Webhook *config.WebhookConfig
|
||||
Observability *config.ObservabilityConfig
|
||||
Kubernetes *config.KubernetesConfig
|
||||
MultiCluster *config.MultiClusterConfig
|
||||
CUE *config.CUEConfig
|
||||
Application *config.ApplicationConfig
|
||||
OAM *config.OAMConfig
|
||||
Performance *config.PerformanceConfig
|
||||
Workflow *config.WorkflowConfig
|
||||
Admission *config.AdmissionConfig
|
||||
Resource *config.ResourceConfig
|
||||
Client *config.ClientConfig
|
||||
Reconcile *config.ReconcileConfig
|
||||
Sharding *config.ShardingConfig
|
||||
Feature *config.FeatureConfig
|
||||
Profiling *config.ProfilingConfig
|
||||
KLog *config.KLogConfig
|
||||
Controller *config.ControllerConfig
|
||||
}
|
||||
|
||||
// NewCoreOptions creates a new NewVelaCoreOptions object with default parameters
|
||||
func NewCoreOptions() *CoreOptions {
|
||||
// Initialize config modules
|
||||
server := config.NewServerConfig()
|
||||
webhook := config.NewWebhookConfig()
|
||||
observability := config.NewObservabilityConfig()
|
||||
kubernetes := config.NewKubernetesConfig()
|
||||
multiCluster := config.NewMultiClusterConfig()
|
||||
cue := config.NewCUEConfig()
|
||||
application := config.NewApplicationConfig()
|
||||
oam := config.NewOAMConfig()
|
||||
performance := config.NewPerformanceConfig()
|
||||
workflow := config.NewWorkflowConfig()
|
||||
admission := config.NewAdmissionConfig()
|
||||
resource := config.NewResourceConfig()
|
||||
client := config.NewClientConfig()
|
||||
reconcile := config.NewReconcileConfig()
|
||||
sharding := config.NewShardingConfig()
|
||||
feature := config.NewFeatureConfig()
|
||||
profiling := config.NewProfilingConfig()
|
||||
klog := config.NewKLogConfig(observability)
|
||||
controller := config.NewControllerConfig()
|
||||
|
||||
s := &CoreOptions{
|
||||
UseWebhook: false,
|
||||
CertDir: "/k8s-webhook-server/serving-certs",
|
||||
WebhookPort: 9443,
|
||||
MetricsAddr: ":8080",
|
||||
EnableLeaderElection: false,
|
||||
LeaderElectionNamespace: "",
|
||||
LogFilePath: "",
|
||||
LogFileMaxSize: 1024,
|
||||
LogDebug: false,
|
||||
DevLogs: false,
|
||||
ControllerArgs: &oamcontroller.Args{
|
||||
RevisionLimit: 50,
|
||||
AppRevisionLimit: 10,
|
||||
DefRevisionLimit: 20,
|
||||
AutoGenWorkloadDefinition: true,
|
||||
ConcurrentReconciles: 4,
|
||||
IgnoreAppWithoutControllerRequirement: false,
|
||||
IgnoreDefinitionWithoutControllerRequirement: false,
|
||||
},
|
||||
HealthAddr: ":9440",
|
||||
StorageDriver: "Local",
|
||||
InformerSyncPeriod: 10 * time.Hour,
|
||||
QPS: 50,
|
||||
Burst: 100,
|
||||
LeaseDuration: 15 * time.Second,
|
||||
RenewDeadLine: 10 * time.Second,
|
||||
RetryPeriod: 2 * time.Second,
|
||||
EnableClusterGateway: false,
|
||||
EnableClusterMetrics: false,
|
||||
ClusterMetricsInterval: 15 * time.Second,
|
||||
// Config modules
|
||||
Server: server,
|
||||
Webhook: webhook,
|
||||
Observability: observability,
|
||||
Kubernetes: kubernetes,
|
||||
MultiCluster: multiCluster,
|
||||
CUE: cue,
|
||||
Application: application,
|
||||
OAM: oam,
|
||||
Performance: performance,
|
||||
Workflow: workflow,
|
||||
Admission: admission,
|
||||
Resource: resource,
|
||||
Client: client,
|
||||
Reconcile: reconcile,
|
||||
Sharding: sharding,
|
||||
Feature: feature,
|
||||
Profiling: profiling,
|
||||
KLog: klog,
|
||||
Controller: controller,
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
|
|
@ -106,75 +99,28 @@ func NewCoreOptions() *CoreOptions {
|
|||
func (s *CoreOptions) Flags() cliflag.NamedFlagSets {
|
||||
fss := cliflag.NamedFlagSets{}
|
||||
|
||||
gfs := fss.FlagSet("generic")
|
||||
gfs.BoolVar(&s.UseWebhook, "use-webhook", s.UseWebhook, "Enable Admission Webhook")
|
||||
gfs.StringVar(&s.CertDir, "webhook-cert-dir", s.CertDir, "Admission webhook cert/key dir.")
|
||||
gfs.IntVar(&s.WebhookPort, "webhook-port", s.WebhookPort, "admission webhook listen address")
|
||||
gfs.StringVar(&s.MetricsAddr, "metrics-addr", s.MetricsAddr, "The address the metric endpoint binds to.")
|
||||
gfs.BoolVar(&s.EnableLeaderElection, "enable-leader-election", s.EnableLeaderElection,
|
||||
"Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.")
|
||||
gfs.StringVar(&s.LeaderElectionNamespace, "leader-election-namespace", s.LeaderElectionNamespace,
|
||||
"Determines the namespace in which the leader election configmap will be created.")
|
||||
gfs.StringVar(&s.LogFilePath, "log-file-path", s.LogFilePath, "The file to write logs to.")
|
||||
gfs.Uint64Var(&s.LogFileMaxSize, "log-file-max-size", s.LogFileMaxSize, "Defines the maximum size a log file can grow to, Unit is megabytes.")
|
||||
gfs.BoolVar(&s.LogDebug, "log-debug", s.LogDebug, "Enable debug logs for development purpose")
|
||||
gfs.BoolVar(&s.DevLogs, "dev-logs", s.DevLogs, "Enable ANSI color formatting for console logs (ignored when log-file-path is set)")
|
||||
gfs.StringVar(&s.HealthAddr, "health-addr", s.HealthAddr, "The address the health endpoint binds to.")
|
||||
gfs.DurationVar(&s.InformerSyncPeriod, "informer-sync-period", s.InformerSyncPeriod,
|
||||
"The re-sync period for informer in controller-runtime. This is a system-level configuration.")
|
||||
gfs.Float64Var(&s.QPS, "kube-api-qps", s.QPS, "the qps for reconcile clients. Low qps may lead to low throughput. High qps may give stress to api-server. Raise this value if concurrent-reconciles is set to be high.")
|
||||
gfs.IntVar(&s.Burst, "kube-api-burst", s.Burst, "the burst for reconcile clients. Recommend setting it qps*2.")
|
||||
gfs.DurationVar(&s.LeaseDuration, "leader-election-lease-duration", s.LeaseDuration,
|
||||
"The duration that non-leader candidates will wait to force acquire leadership")
|
||||
gfs.DurationVar(&s.RenewDeadLine, "leader-election-renew-deadline", s.RenewDeadLine,
|
||||
"The duration that the acting controlplane will retry refreshing leadership before giving up")
|
||||
gfs.DurationVar(&s.RetryPeriod, "leader-election-retry-period", s.RetryPeriod,
|
||||
"The duration the LeaderElector clients should wait between tries of actions")
|
||||
gfs.BoolVar(&s.EnableClusterGateway, "enable-cluster-gateway", s.EnableClusterGateway, "Enable cluster-gateway to use multicluster, disabled by default.")
|
||||
gfs.BoolVar(&s.EnableClusterMetrics, "enable-cluster-metrics", s.EnableClusterMetrics, "Enable cluster-metrics-management to collect metrics from clusters with cluster-gateway, disabled by default. When this param is enabled, enable-cluster-gateway should be enabled")
|
||||
gfs.DurationVar(&s.ClusterMetricsInterval, "cluster-metrics-interval", s.ClusterMetricsInterval, "The interval that ClusterMetricsMgr will collect metrics from clusters, default value is 15 seconds.")
|
||||
gfs.BoolVar(&cuex.EnableExternalPackageForDefaultCompiler, "enable-external-package-for-default-compiler", cuex.EnableExternalPackageForDefaultCompiler, "Enable external package for default compiler")
|
||||
gfs.BoolVar(&cuex.EnableExternalPackageWatchForDefaultCompiler, "enable-external-package-watch-for-default-compiler", cuex.EnableExternalPackageWatchForDefaultCompiler, "Enable external package watch for default compiler")
|
||||
// Use config modules to register flags - clean delegation pattern
|
||||
s.Server.AddFlags(fss.FlagSet("server"))
|
||||
s.Webhook.AddFlags(fss.FlagSet("webhook"))
|
||||
s.Observability.AddFlags(fss.FlagSet("observability"))
|
||||
s.Kubernetes.AddFlags(fss.FlagSet("kubernetes"))
|
||||
s.MultiCluster.AddFlags(fss.FlagSet("multicluster"))
|
||||
s.CUE.AddFlags(fss.FlagSet("cue"))
|
||||
s.Application.AddFlags(fss.FlagSet("application"))
|
||||
s.OAM.AddFlags(fss.FlagSet("oam"))
|
||||
s.Performance.AddFlags(fss.FlagSet("performance"))
|
||||
s.Admission.AddFlags(fss.FlagSet("admission"))
|
||||
s.Resource.AddFlags(fss.FlagSet("resource"))
|
||||
s.Workflow.AddFlags(fss.FlagSet("workflow"))
|
||||
s.Controller.AddFlags(fss.FlagSet("controller"))
|
||||
|
||||
s.ControllerArgs.AddFlags(fss.FlagSet("controllerArgs"), s.ControllerArgs)
|
||||
|
||||
cfs := fss.FlagSet("commonconfig")
|
||||
cfs.DurationVar(&commonconfig.ApplicationReSyncPeriod, "application-re-sync-period", commonconfig.ApplicationReSyncPeriod,
|
||||
"Re-sync period for application to re-sync, also known as the state-keep interval.")
|
||||
cfs.BoolVar(&commonconfig.PerfEnabled, "perf-enabled", commonconfig.PerfEnabled, "Enable performance logging for controllers, disabled by default.")
|
||||
|
||||
ofs := fss.FlagSet("oam")
|
||||
ofs.StringVar(&oam.SystemDefinitionNamespace, "system-definition-namespace", "vela-system", "define the namespace of the system-level definition")
|
||||
|
||||
standardcontroller.AddOptimizeFlags(fss.FlagSet("optimize"))
|
||||
standardcontroller.AddAdmissionFlags(fss.FlagSet("admission"))
|
||||
|
||||
rfs := fss.FlagSet("resourcekeeper")
|
||||
rfs.IntVar(&resourcekeeper.MaxDispatchConcurrent, "max-dispatch-concurrent", 10, "Set the max dispatch concurrent number, default is 10")
|
||||
|
||||
wfs := fss.FlagSet("wfTypes")
|
||||
wfs.IntVar(&wfTypes.MaxWorkflowWaitBackoffTime, "max-workflow-wait-backoff-time", 60, "Set the max workflow wait backoff time, default is 60")
|
||||
wfs.IntVar(&wfTypes.MaxWorkflowFailedBackoffTime, "max-workflow-failed-backoff-time", 300, "Set the max workflow failed backoff time, default is 300")
|
||||
wfs.IntVar(&wfTypes.MaxWorkflowStepErrorRetryTimes, "max-workflow-step-error-retry-times", 10, "Set the max workflow step error retry times, default is 10")
|
||||
|
||||
pkgmulticluster.AddFlags(fss.FlagSet("multicluster"))
|
||||
ctrlrec.AddFlags(fss.FlagSet("controllerreconciles"))
|
||||
utilfeature.DefaultMutableFeatureGate.AddFlag(fss.FlagSet("featuregate"))
|
||||
sharding.AddFlags(fss.FlagSet("sharding"))
|
||||
kfs := fss.FlagSet("klog")
|
||||
pkgclient.AddTimeoutControllerClientFlags(fss.FlagSet("controllerclient"))
|
||||
utillog.AddFlags(kfs)
|
||||
profiling.AddFlags(fss.FlagSet("profiling"))
|
||||
|
||||
if s.LogDebug {
|
||||
_ = kfs.Set("v", strconv.Itoa(int(commonconfig.LogDebug)))
|
||||
}
|
||||
|
||||
if s.LogFilePath != "" {
|
||||
_ = kfs.Set("logtostderr", "false")
|
||||
_ = kfs.Set("log_file", s.LogFilePath)
|
||||
_ = kfs.Set("log_file_max_size", strconv.FormatUint(s.LogFileMaxSize, 10))
|
||||
}
|
||||
// External package configurations (now wrapped in config modules)
|
||||
s.Client.AddFlags(fss.FlagSet("client"))
|
||||
s.Reconcile.AddFlags(fss.FlagSet("reconcile"))
|
||||
s.Sharding.AddFlags(fss.FlagSet("sharding"))
|
||||
s.Feature.AddFlags(fss.FlagSet("feature"))
|
||||
s.Profiling.AddFlags(fss.FlagSet("profiling"))
|
||||
s.KLog.AddFlags(fss.FlagSet("klog"))
|
||||
|
||||
return fss
|
||||
}
|
||||
|
|
|
|||
|
|
@ -20,104 +20,915 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/kubevela/pkg/cue/cuex"
|
||||
wfTypes "github.com/kubevela/workflow/pkg/types"
|
||||
"github.com/spf13/pflag"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
oamcontroller "github.com/oam-dev/kubevela/pkg/controller/core.oam.dev"
|
||||
commonconfig "github.com/oam-dev/kubevela/pkg/controller/common"
|
||||
"github.com/oam-dev/kubevela/pkg/oam"
|
||||
"github.com/oam-dev/kubevela/pkg/resourcekeeper"
|
||||
)
|
||||
|
||||
func TestCoreOptions_Flags(t *testing.T) {
|
||||
func TestNewCoreOptions_DefaultValues(t *testing.T) {
|
||||
opt := NewCoreOptions()
|
||||
|
||||
// Test Server defaults
|
||||
assert.Equal(t, ":9440", opt.Server.HealthAddr)
|
||||
assert.Equal(t, "Local", opt.Server.StorageDriver)
|
||||
assert.Equal(t, false, opt.Server.EnableLeaderElection)
|
||||
assert.Equal(t, "", opt.Server.LeaderElectionNamespace)
|
||||
assert.Equal(t, 15*time.Second, opt.Server.LeaseDuration)
|
||||
assert.Equal(t, 10*time.Second, opt.Server.RenewDeadline)
|
||||
assert.Equal(t, 2*time.Second, opt.Server.RetryPeriod)
|
||||
|
||||
// Test Webhook defaults
|
||||
assert.Equal(t, false, opt.Webhook.UseWebhook)
|
||||
assert.Equal(t, "/k8s-webhook-server/serving-certs", opt.Webhook.CertDir)
|
||||
assert.Equal(t, 9443, opt.Webhook.WebhookPort)
|
||||
|
||||
// Test Observability defaults
|
||||
assert.Equal(t, ":8080", opt.Observability.MetricsAddr)
|
||||
assert.Equal(t, false, opt.Observability.LogDebug)
|
||||
assert.Equal(t, "", opt.Observability.LogFilePath)
|
||||
assert.Equal(t, uint64(1024), opt.Observability.LogFileMaxSize)
|
||||
|
||||
// Test Kubernetes defaults
|
||||
assert.Equal(t, 10*time.Hour, opt.Kubernetes.InformerSyncPeriod)
|
||||
assert.Equal(t, float64(50), opt.Kubernetes.QPS)
|
||||
assert.Equal(t, 100, opt.Kubernetes.Burst)
|
||||
|
||||
// Test MultiCluster defaults
|
||||
assert.Equal(t, false, opt.MultiCluster.EnableClusterGateway)
|
||||
assert.Equal(t, false, opt.MultiCluster.EnableClusterMetrics)
|
||||
assert.Equal(t, 15*time.Second, opt.MultiCluster.ClusterMetricsInterval)
|
||||
|
||||
// Test CUE defaults
|
||||
assert.NotNil(t, opt.CUE)
|
||||
|
||||
// Test Application defaults
|
||||
assert.Equal(t, 5*time.Minute, opt.Application.ReSyncPeriod)
|
||||
|
||||
// Test OAM defaults
|
||||
assert.Equal(t, "vela-system", opt.OAM.SystemDefinitionNamespace)
|
||||
|
||||
// Test Performance defaults
|
||||
assert.Equal(t, false, opt.Performance.PerfEnabled)
|
||||
|
||||
// Test Controller defaults
|
||||
assert.Equal(t, 50, opt.Controller.RevisionLimit)
|
||||
assert.Equal(t, 10, opt.Controller.AppRevisionLimit)
|
||||
assert.Equal(t, 20, opt.Controller.DefRevisionLimit)
|
||||
assert.Equal(t, true, opt.Controller.AutoGenWorkloadDefinition)
|
||||
assert.Equal(t, 4, opt.Controller.ConcurrentReconciles)
|
||||
assert.Equal(t, false, opt.Controller.IgnoreAppWithoutControllerRequirement)
|
||||
assert.Equal(t, false, opt.Controller.IgnoreDefinitionWithoutControllerRequirement)
|
||||
|
||||
// Test Workflow defaults
|
||||
assert.Equal(t, 60, opt.Workflow.MaxWaitBackoffTime)
|
||||
assert.Equal(t, 300, opt.Workflow.MaxFailedBackoffTime)
|
||||
assert.Equal(t, 10, opt.Workflow.MaxStepErrorRetryTimes)
|
||||
|
||||
// Test Resource defaults
|
||||
assert.Equal(t, 10, opt.Resource.MaxDispatchConcurrent)
|
||||
|
||||
// Ensure all config modules are initialized
|
||||
assert.NotNil(t, opt.Admission)
|
||||
assert.NotNil(t, opt.Client)
|
||||
assert.NotNil(t, opt.Reconcile)
|
||||
assert.NotNil(t, opt.Sharding)
|
||||
assert.NotNil(t, opt.Feature)
|
||||
assert.NotNil(t, opt.Profiling)
|
||||
assert.NotNil(t, opt.KLog)
|
||||
assert.NotNil(t, opt.Controller)
|
||||
}
|
||||
|
||||
func TestCoreOptions_FlagsCompleteSet(t *testing.T) {
|
||||
fs := pflag.NewFlagSet("test", pflag.ContinueOnError)
|
||||
opt := &CoreOptions{
|
||||
ControllerArgs: &oamcontroller.Args{},
|
||||
}
|
||||
opt := NewCoreOptions()
|
||||
|
||||
for _, f := range opt.Flags().FlagSets {
|
||||
fs.AddFlagSet(f)
|
||||
}
|
||||
|
||||
args := []string{
|
||||
"--application-re-sync-period=5s",
|
||||
"--cluster-metrics-interval=5s",
|
||||
"--enable-cluster-gateway=true",
|
||||
"--enable-cluster-metrics=true",
|
||||
"--enable-leader-election=true",
|
||||
// Server flags
|
||||
"--health-addr=/healthz",
|
||||
"--informer-sync-period=3s",
|
||||
"--kube-api-burst=500",
|
||||
"--kube-api-qps=200",
|
||||
"--leader-election-lease-duration=3s",
|
||||
"--storage-driver=MongoDB",
|
||||
"--enable-leader-election=true",
|
||||
"--leader-election-namespace=test-namespace",
|
||||
"--leader-election-lease-duration=3s",
|
||||
"--leader-election-renew-deadline=5s",
|
||||
"--leader-election-retry-period=3s",
|
||||
"--log-debug=true",
|
||||
"--log-file-max-size=50",
|
||||
"--log-file-path=/path/to/log",
|
||||
"--max-dispatch-concurrent=5",
|
||||
"--max-workflow-failed-backoff-time=30",
|
||||
"--max-workflow-step-error-retry-times=5",
|
||||
"--max-workflow-wait-backoff-time=5",
|
||||
"--metrics-addr=/metrics",
|
||||
"--perf-enabled=true",
|
||||
// Webhook flags
|
||||
"--use-webhook=true",
|
||||
"--webhook-cert-dir=/path/to/cert",
|
||||
"--webhook-port=8080",
|
||||
// Observability flags
|
||||
"--metrics-addr=/metrics",
|
||||
"--log-debug=true",
|
||||
"--log-file-path=/path/to/log",
|
||||
"--log-file-max-size=50",
|
||||
// Kubernetes flags
|
||||
"--informer-sync-period=3s",
|
||||
"--kube-api-qps=200",
|
||||
"--kube-api-burst=500",
|
||||
// MultiCluster flags
|
||||
"--enable-cluster-gateway=true",
|
||||
"--enable-cluster-metrics=true",
|
||||
"--cluster-metrics-interval=5s",
|
||||
// CUE flags
|
||||
"--enable-external-package-for-default-compiler=true",
|
||||
"--enable-external-package-watch-for-default-compiler=true",
|
||||
// Application flags
|
||||
"--application-re-sync-period=5s",
|
||||
// OAM flags
|
||||
"--system-definition-namespace=custom-namespace",
|
||||
// Performance flags
|
||||
"--perf-enabled=true",
|
||||
// Controller flags
|
||||
"--revision-limit=100",
|
||||
"--application-revision-limit=20",
|
||||
"--definition-revision-limit=30",
|
||||
"--autogen-workload-definition=false",
|
||||
"--concurrent-reconciles=8",
|
||||
"--ignore-app-without-controller-version=true",
|
||||
"--ignore-definition-without-controller-version=true",
|
||||
// Workflow flags
|
||||
"--max-workflow-wait-backoff-time=30",
|
||||
"--max-workflow-failed-backoff-time=150",
|
||||
"--max-workflow-step-error-retry-times=5",
|
||||
// Resource flags
|
||||
"--max-dispatch-concurrent=5",
|
||||
}
|
||||
|
||||
if err := fs.Parse(args); err != nil {
|
||||
t.Errorf("Failed to parse args: %v", err)
|
||||
}
|
||||
err := fs.Parse(args)
|
||||
require.NoError(t, err)
|
||||
|
||||
expected := &CoreOptions{
|
||||
UseWebhook: true,
|
||||
CertDir: "/path/to/cert",
|
||||
WebhookPort: 8080,
|
||||
MetricsAddr: "/metrics",
|
||||
EnableLeaderElection: true,
|
||||
LeaderElectionNamespace: "test-namespace",
|
||||
LogFilePath: "/path/to/log",
|
||||
LogFileMaxSize: 50,
|
||||
LogDebug: true,
|
||||
ControllerArgs: &oamcontroller.Args{},
|
||||
HealthAddr: "/healthz",
|
||||
StorageDriver: "",
|
||||
InformerSyncPeriod: 3 * time.Second,
|
||||
QPS: 200,
|
||||
Burst: 500,
|
||||
LeaseDuration: 3 * time.Second,
|
||||
RenewDeadLine: 5 * time.Second,
|
||||
RetryPeriod: 3 * time.Second,
|
||||
EnableClusterGateway: true,
|
||||
EnableClusterMetrics: true,
|
||||
ClusterMetricsInterval: 5 * time.Second,
|
||||
}
|
||||
// Verify Server flags
|
||||
assert.Equal(t, "/healthz", opt.Server.HealthAddr)
|
||||
assert.Equal(t, "MongoDB", opt.Server.StorageDriver)
|
||||
assert.Equal(t, true, opt.Server.EnableLeaderElection)
|
||||
assert.Equal(t, "test-namespace", opt.Server.LeaderElectionNamespace)
|
||||
assert.Equal(t, 3*time.Second, opt.Server.LeaseDuration)
|
||||
assert.Equal(t, 5*time.Second, opt.Server.RenewDeadline)
|
||||
assert.Equal(t, 3*time.Second, opt.Server.RetryPeriod)
|
||||
|
||||
if !cmp.Equal(opt, expected, cmp.AllowUnexported(CoreOptions{})) {
|
||||
t.Errorf("Flags() diff: %v", cmp.Diff(opt, expected, cmp.AllowUnexported(CoreOptions{})))
|
||||
}
|
||||
// Verify Webhook flags
|
||||
assert.Equal(t, true, opt.Webhook.UseWebhook)
|
||||
assert.Equal(t, "/path/to/cert", opt.Webhook.CertDir)
|
||||
assert.Equal(t, 8080, opt.Webhook.WebhookPort)
|
||||
|
||||
// Verify Observability flags
|
||||
assert.Equal(t, "/metrics", opt.Observability.MetricsAddr)
|
||||
assert.Equal(t, true, opt.Observability.LogDebug)
|
||||
assert.Equal(t, "/path/to/log", opt.Observability.LogFilePath)
|
||||
assert.Equal(t, uint64(50), opt.Observability.LogFileMaxSize)
|
||||
|
||||
// Verify Kubernetes flags
|
||||
assert.Equal(t, 3*time.Second, opt.Kubernetes.InformerSyncPeriod)
|
||||
assert.Equal(t, float64(200), opt.Kubernetes.QPS)
|
||||
assert.Equal(t, 500, opt.Kubernetes.Burst)
|
||||
|
||||
// Verify MultiCluster flags
|
||||
assert.Equal(t, true, opt.MultiCluster.EnableClusterGateway)
|
||||
assert.Equal(t, true, opt.MultiCluster.EnableClusterMetrics)
|
||||
assert.Equal(t, 5*time.Second, opt.MultiCluster.ClusterMetricsInterval)
|
||||
|
||||
// Verify CUE flags
|
||||
assert.True(t, opt.CUE.EnableExternalPackage)
|
||||
assert.True(t, opt.CUE.EnableExternalPackageWatch)
|
||||
|
||||
// Verify Application flags
|
||||
assert.Equal(t, 5*time.Second, opt.Application.ReSyncPeriod)
|
||||
|
||||
// Verify OAM flags
|
||||
assert.Equal(t, "custom-namespace", opt.OAM.SystemDefinitionNamespace)
|
||||
|
||||
// Verify Performance flags
|
||||
assert.Equal(t, true, opt.Performance.PerfEnabled)
|
||||
|
||||
// Verify Controller flags
|
||||
assert.Equal(t, 100, opt.Controller.RevisionLimit)
|
||||
assert.Equal(t, 20, opt.Controller.AppRevisionLimit)
|
||||
assert.Equal(t, 30, opt.Controller.DefRevisionLimit)
|
||||
assert.Equal(t, false, opt.Controller.AutoGenWorkloadDefinition)
|
||||
assert.Equal(t, 8, opt.Controller.ConcurrentReconciles)
|
||||
assert.Equal(t, true, opt.Controller.IgnoreAppWithoutControllerRequirement)
|
||||
assert.Equal(t, true, opt.Controller.IgnoreDefinitionWithoutControllerRequirement)
|
||||
|
||||
// Verify Workflow flags
|
||||
assert.Equal(t, 30, opt.Workflow.MaxWaitBackoffTime)
|
||||
assert.Equal(t, 150, opt.Workflow.MaxFailedBackoffTime)
|
||||
assert.Equal(t, 5, opt.Workflow.MaxStepErrorRetryTimes)
|
||||
|
||||
// Verify Resource flags
|
||||
assert.Equal(t, 5, opt.Resource.MaxDispatchConcurrent)
|
||||
}
|
||||
|
||||
func TestCuexOptions_Flags(t *testing.T) {
|
||||
pflag.NewFlagSet("test", pflag.ContinueOnError)
|
||||
func TestCuexOptions_SyncToGlobals(t *testing.T) {
|
||||
// Reset globals
|
||||
cuex.EnableExternalPackageForDefaultCompiler = false
|
||||
cuex.EnableExternalPackageWatchForDefaultCompiler = false
|
||||
|
||||
opts := &CoreOptions{
|
||||
ControllerArgs: &oamcontroller.Args{},
|
||||
}
|
||||
opts := NewCoreOptions()
|
||||
fss := opts.Flags()
|
||||
|
||||
args := []string{
|
||||
"--enable-external-package-for-default-compiler=true",
|
||||
"--enable-external-package-watch-for-default-compiler=true",
|
||||
}
|
||||
err := fss.FlagSet("generic").Parse(args)
|
||||
if err != nil {
|
||||
return
|
||||
|
||||
err := fss.FlagSet("cue").Parse(args)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Before sync, globals should still be false
|
||||
assert.False(t, cuex.EnableExternalPackageForDefaultCompiler)
|
||||
assert.False(t, cuex.EnableExternalPackageWatchForDefaultCompiler)
|
||||
|
||||
// After sync, globals should be updated
|
||||
opts.CUE.SyncToCUEGlobals()
|
||||
assert.True(t, cuex.EnableExternalPackageForDefaultCompiler)
|
||||
assert.True(t, cuex.EnableExternalPackageWatchForDefaultCompiler)
|
||||
}
|
||||
|
||||
func TestWorkflowOptions_SyncToGlobals(t *testing.T) {
|
||||
// Store original values
|
||||
origWait := wfTypes.MaxWorkflowWaitBackoffTime
|
||||
origFailed := wfTypes.MaxWorkflowFailedBackoffTime
|
||||
origRetry := wfTypes.MaxWorkflowStepErrorRetryTimes
|
||||
|
||||
// Restore after test
|
||||
defer func() {
|
||||
wfTypes.MaxWorkflowWaitBackoffTime = origWait
|
||||
wfTypes.MaxWorkflowFailedBackoffTime = origFailed
|
||||
wfTypes.MaxWorkflowStepErrorRetryTimes = origRetry
|
||||
}()
|
||||
|
||||
opts := NewCoreOptions()
|
||||
fss := opts.Flags()
|
||||
|
||||
args := []string{
|
||||
"--max-workflow-wait-backoff-time=120",
|
||||
"--max-workflow-failed-backoff-time=600",
|
||||
"--max-workflow-step-error-retry-times=20",
|
||||
}
|
||||
|
||||
assert.True(t, cuex.EnableExternalPackageForDefaultCompiler, "The --enable-external-package-for-default-compiler flag should be enabled")
|
||||
assert.True(t, cuex.EnableExternalPackageWatchForDefaultCompiler, "The --enable-external-package-watch-for-default-compiler flag should be enabled")
|
||||
err := fss.FlagSet("workflow").Parse(args)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify struct fields are updated
|
||||
assert.Equal(t, 120, opts.Workflow.MaxWaitBackoffTime)
|
||||
assert.Equal(t, 600, opts.Workflow.MaxFailedBackoffTime)
|
||||
assert.Equal(t, 20, opts.Workflow.MaxStepErrorRetryTimes)
|
||||
|
||||
// After sync, globals should be updated
|
||||
opts.Workflow.SyncToWorkflowGlobals()
|
||||
assert.Equal(t, 120, wfTypes.MaxWorkflowWaitBackoffTime)
|
||||
assert.Equal(t, 600, wfTypes.MaxWorkflowFailedBackoffTime)
|
||||
assert.Equal(t, 20, wfTypes.MaxWorkflowStepErrorRetryTimes)
|
||||
}
|
||||
|
||||
func TestOAMOptions_SyncToGlobals(t *testing.T) {
|
||||
// Store original value
|
||||
origNamespace := oam.SystemDefinitionNamespace
|
||||
|
||||
// Restore after test
|
||||
defer func() {
|
||||
oam.SystemDefinitionNamespace = origNamespace
|
||||
}()
|
||||
|
||||
opts := NewCoreOptions()
|
||||
fss := opts.Flags()
|
||||
|
||||
args := []string{
|
||||
"--system-definition-namespace=custom-system",
|
||||
}
|
||||
|
||||
err := fss.FlagSet("oam").Parse(args)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify struct field is updated
|
||||
assert.Equal(t, "custom-system", opts.OAM.SystemDefinitionNamespace)
|
||||
|
||||
// After sync, global should be updated
|
||||
opts.OAM.SyncToOAMGlobals()
|
||||
assert.Equal(t, "custom-system", oam.SystemDefinitionNamespace)
|
||||
}
|
||||
|
||||
func TestPerformanceOptions_SyncToGlobals(t *testing.T) {
|
||||
// Store original value
|
||||
origPerf := commonconfig.PerfEnabled
|
||||
|
||||
// Restore after test
|
||||
defer func() {
|
||||
commonconfig.PerfEnabled = origPerf
|
||||
}()
|
||||
|
||||
opts := NewCoreOptions()
|
||||
fss := opts.Flags()
|
||||
|
||||
args := []string{
|
||||
"--perf-enabled=true",
|
||||
}
|
||||
|
||||
err := fss.FlagSet("performance").Parse(args)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify struct field is updated
|
||||
assert.Equal(t, true, opts.Performance.PerfEnabled)
|
||||
|
||||
// After sync, global should be updated
|
||||
opts.Performance.SyncToPerformanceGlobals()
|
||||
assert.True(t, commonconfig.PerfEnabled)
|
||||
}
|
||||
|
||||
func TestApplicationOptions_SyncToGlobals(t *testing.T) {
|
||||
// Store original value
|
||||
origPeriod := commonconfig.ApplicationReSyncPeriod
|
||||
|
||||
// Restore after test
|
||||
defer func() {
|
||||
commonconfig.ApplicationReSyncPeriod = origPeriod
|
||||
}()
|
||||
|
||||
opts := NewCoreOptions()
|
||||
fss := opts.Flags()
|
||||
|
||||
args := []string{
|
||||
"--application-re-sync-period=10m",
|
||||
}
|
||||
|
||||
err := fss.FlagSet("application").Parse(args)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify struct field is updated
|
||||
assert.Equal(t, 10*time.Minute, opts.Application.ReSyncPeriod)
|
||||
|
||||
// After sync, global should be updated
|
||||
opts.Application.SyncToApplicationGlobals()
|
||||
assert.Equal(t, 10*time.Minute, commonconfig.ApplicationReSyncPeriod)
|
||||
}
|
||||
|
||||
func TestResourceOptions_SyncToGlobals(t *testing.T) {
|
||||
// Store original value
|
||||
origDispatch := resourcekeeper.MaxDispatchConcurrent
|
||||
|
||||
// Restore after test
|
||||
defer func() {
|
||||
resourcekeeper.MaxDispatchConcurrent = origDispatch
|
||||
}()
|
||||
|
||||
opts := NewCoreOptions()
|
||||
fss := opts.Flags()
|
||||
|
||||
args := []string{
|
||||
"--max-dispatch-concurrent=25",
|
||||
}
|
||||
|
||||
err := fss.FlagSet("resource").Parse(args)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify struct field is updated
|
||||
assert.Equal(t, 25, opts.Resource.MaxDispatchConcurrent)
|
||||
|
||||
// After sync, global should be updated
|
||||
opts.Resource.SyncToResourceGlobals()
|
||||
assert.Equal(t, 25, resourcekeeper.MaxDispatchConcurrent)
|
||||
}
|
||||
|
||||
func TestCoreOptions_InvalidValues(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
args []string
|
||||
expectError bool
|
||||
errorMsg string
|
||||
}{
|
||||
{
|
||||
name: "invalid boolean value",
|
||||
args: []string{
|
||||
"--enable-leader-election=notabool",
|
||||
},
|
||||
expectError: true,
|
||||
errorMsg: "invalid argument",
|
||||
},
|
||||
{
|
||||
name: "invalid duration value",
|
||||
args: []string{
|
||||
"--leader-election-lease-duration=notaduration",
|
||||
},
|
||||
expectError: true,
|
||||
errorMsg: "invalid argument",
|
||||
},
|
||||
{
|
||||
name: "invalid int value",
|
||||
args: []string{
|
||||
"--webhook-port=notanint",
|
||||
},
|
||||
expectError: true,
|
||||
errorMsg: "invalid argument",
|
||||
},
|
||||
{
|
||||
name: "invalid float value",
|
||||
args: []string{
|
||||
"--kube-api-qps=notafloat",
|
||||
},
|
||||
expectError: true,
|
||||
errorMsg: "invalid argument",
|
||||
},
|
||||
{
|
||||
name: "invalid uint64 value",
|
||||
args: []string{
|
||||
"--log-file-max-size=-100",
|
||||
},
|
||||
expectError: true,
|
||||
errorMsg: "invalid argument",
|
||||
},
|
||||
{
|
||||
name: "unknown flag",
|
||||
args: []string{
|
||||
"--unknown-flag=value",
|
||||
},
|
||||
expectError: true,
|
||||
errorMsg: "unknown flag",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
fs := pflag.NewFlagSet("test", pflag.ContinueOnError)
|
||||
opt := NewCoreOptions()
|
||||
|
||||
for _, f := range opt.Flags().FlagSets {
|
||||
fs.AddFlagSet(f)
|
||||
}
|
||||
|
||||
err := fs.Parse(tt.args)
|
||||
if tt.expectError {
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), tt.errorMsg)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCoreOptions_PartialConfiguration(t *testing.T) {
|
||||
// Test that partial configuration works correctly
|
||||
// and doesn't override other defaults
|
||||
fs := pflag.NewFlagSet("test", pflag.ContinueOnError)
|
||||
opt := NewCoreOptions()
|
||||
|
||||
for _, f := range opt.Flags().FlagSets {
|
||||
fs.AddFlagSet(f)
|
||||
}
|
||||
|
||||
// Only set a few flags
|
||||
args := []string{
|
||||
"--enable-leader-election=true",
|
||||
"--log-debug=true",
|
||||
"--perf-enabled=true",
|
||||
}
|
||||
|
||||
err := fs.Parse(args)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check that specified flags are updated
|
||||
assert.Equal(t, true, opt.Server.EnableLeaderElection)
|
||||
assert.Equal(t, true, opt.Observability.LogDebug)
|
||||
assert.Equal(t, true, opt.Performance.PerfEnabled)
|
||||
|
||||
// Check that unspecified flags retain defaults
|
||||
assert.Equal(t, ":9440", opt.Server.HealthAddr)
|
||||
assert.Equal(t, "Local", opt.Server.StorageDriver)
|
||||
assert.Equal(t, false, opt.Webhook.UseWebhook)
|
||||
assert.Equal(t, ":8080", opt.Observability.MetricsAddr)
|
||||
assert.Equal(t, 10*time.Hour, opt.Kubernetes.InformerSyncPeriod)
|
||||
assert.Equal(t, 10, opt.Resource.MaxDispatchConcurrent)
|
||||
}
|
||||
|
||||
func TestCoreOptions_FlagSetsOrganization(t *testing.T) {
|
||||
opt := NewCoreOptions()
|
||||
fss := opt.Flags()
|
||||
|
||||
// Verify that all expected flag sets are created
|
||||
expectedFlagSets := []string{
|
||||
"server",
|
||||
"webhook",
|
||||
"observability",
|
||||
"kubernetes",
|
||||
"multicluster",
|
||||
"cue",
|
||||
"application",
|
||||
"oam",
|
||||
"performance",
|
||||
"admission",
|
||||
"resource",
|
||||
"workflow",
|
||||
"controller",
|
||||
"client",
|
||||
"reconcile",
|
||||
"sharding",
|
||||
"feature",
|
||||
"profiling",
|
||||
"klog",
|
||||
}
|
||||
|
||||
for _, name := range expectedFlagSets {
|
||||
fs := fss.FlagSet(name)
|
||||
assert.NotNil(t, fs, "FlagSet %s should exist", name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCoreOptions_FlagHelp(t *testing.T) {
|
||||
opt := NewCoreOptions()
|
||||
fss := opt.Flags()
|
||||
|
||||
// Test that flags have proper help messages
|
||||
serverFS := fss.FlagSet("server")
|
||||
flag := serverFS.Lookup("enable-leader-election")
|
||||
assert.NotNil(t, flag)
|
||||
assert.Contains(t, flag.Usage, "Enable leader election")
|
||||
|
||||
webhookFS := fss.FlagSet("webhook")
|
||||
flag = webhookFS.Lookup("use-webhook")
|
||||
assert.NotNil(t, flag)
|
||||
assert.Contains(t, flag.Usage, "Enable Admission Webhook")
|
||||
|
||||
obsFS := fss.FlagSet("observability")
|
||||
flag = obsFS.Lookup("log-debug")
|
||||
assert.NotNil(t, flag)
|
||||
assert.Contains(t, flag.Usage, "Enable debug logs")
|
||||
}
|
||||
|
||||
func TestCoreOptions_MultipleSyncCalls(t *testing.T) {
|
||||
// Store original values
|
||||
origCUEExternal := cuex.EnableExternalPackageForDefaultCompiler
|
||||
origCUEWatch := cuex.EnableExternalPackageWatchForDefaultCompiler
|
||||
origWait := wfTypes.MaxWorkflowWaitBackoffTime
|
||||
origDispatch := resourcekeeper.MaxDispatchConcurrent
|
||||
origOAMNamespace := oam.SystemDefinitionNamespace
|
||||
origAppPeriod := commonconfig.ApplicationReSyncPeriod
|
||||
origPerf := commonconfig.PerfEnabled
|
||||
|
||||
// Restore after test
|
||||
defer func() {
|
||||
cuex.EnableExternalPackageForDefaultCompiler = origCUEExternal
|
||||
cuex.EnableExternalPackageWatchForDefaultCompiler = origCUEWatch
|
||||
wfTypes.MaxWorkflowWaitBackoffTime = origWait
|
||||
resourcekeeper.MaxDispatchConcurrent = origDispatch
|
||||
oam.SystemDefinitionNamespace = origOAMNamespace
|
||||
commonconfig.ApplicationReSyncPeriod = origAppPeriod
|
||||
commonconfig.PerfEnabled = origPerf
|
||||
}()
|
||||
|
||||
// Test that calling sync multiple times doesn't cause issues
|
||||
opts := NewCoreOptions()
|
||||
|
||||
// Set some values
|
||||
opts.CUE.EnableExternalPackage = true
|
||||
opts.CUE.EnableExternalPackageWatch = false
|
||||
opts.Workflow.MaxWaitBackoffTime = 100
|
||||
opts.Resource.MaxDispatchConcurrent = 20
|
||||
opts.OAM.SystemDefinitionNamespace = "test-system"
|
||||
opts.Application.ReSyncPeriod = 15 * time.Minute
|
||||
opts.Performance.PerfEnabled = true
|
||||
|
||||
// Call sync multiple times
|
||||
opts.CUE.SyncToCUEGlobals()
|
||||
opts.CUE.SyncToCUEGlobals()
|
||||
|
||||
opts.Workflow.SyncToWorkflowGlobals()
|
||||
opts.Workflow.SyncToWorkflowGlobals()
|
||||
|
||||
opts.Resource.SyncToResourceGlobals()
|
||||
opts.Resource.SyncToResourceGlobals()
|
||||
|
||||
opts.OAM.SyncToOAMGlobals()
|
||||
opts.OAM.SyncToOAMGlobals()
|
||||
|
||||
opts.Application.SyncToApplicationGlobals()
|
||||
opts.Application.SyncToApplicationGlobals()
|
||||
|
||||
opts.Performance.SyncToPerformanceGlobals()
|
||||
opts.Performance.SyncToPerformanceGlobals()
|
||||
|
||||
// Verify values are still correct
|
||||
assert.True(t, cuex.EnableExternalPackageForDefaultCompiler)
|
||||
assert.False(t, cuex.EnableExternalPackageWatchForDefaultCompiler)
|
||||
assert.Equal(t, 100, wfTypes.MaxWorkflowWaitBackoffTime)
|
||||
assert.Equal(t, 20, resourcekeeper.MaxDispatchConcurrent)
|
||||
assert.Equal(t, "test-system", oam.SystemDefinitionNamespace)
|
||||
assert.Equal(t, 15*time.Minute, commonconfig.ApplicationReSyncPeriod)
|
||||
assert.True(t, commonconfig.PerfEnabled)
|
||||
}
|
||||
|
||||
func TestCoreOptions_SpecialCharactersInStrings(t *testing.T) {
|
||||
fs := pflag.NewFlagSet("test", pflag.ContinueOnError)
|
||||
opt := NewCoreOptions()
|
||||
|
||||
for _, f := range opt.Flags().FlagSets {
|
||||
fs.AddFlagSet(f)
|
||||
}
|
||||
|
||||
// Test with special characters and spaces in paths
|
||||
args := []string{
|
||||
`--webhook-cert-dir=/path/with spaces/and-special!@#$%chars`,
|
||||
`--log-file-path=/var/log/kubevela/日本語/логи.log`,
|
||||
`--health-addr=[::1]:8080`,
|
||||
`--metrics-addr=0.0.0.0:9090`,
|
||||
}
|
||||
|
||||
err := fs.Parse(args)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, `/path/with spaces/and-special!@#$%chars`, opt.Webhook.CertDir)
|
||||
assert.Equal(t, `/var/log/kubevela/日本語/логи.log`, opt.Observability.LogFilePath)
|
||||
assert.Equal(t, `[::1]:8080`, opt.Server.HealthAddr)
|
||||
assert.Equal(t, `0.0.0.0:9090`, opt.Observability.MetricsAddr)
|
||||
}
|
||||
|
||||
func TestCoreOptions_ConcurrentAccess(t *testing.T) {
|
||||
// Test that the options can be accessed concurrently safely
|
||||
opt := NewCoreOptions()
|
||||
|
||||
// Set some values
|
||||
opt.Server.EnableLeaderElection = true
|
||||
opt.Workflow.MaxWaitBackoffTime = 100
|
||||
opt.Resource.MaxDispatchConcurrent = 20
|
||||
|
||||
// Simulate concurrent access
|
||||
done := make(chan bool, 3)
|
||||
|
||||
go func() {
|
||||
for i := 0; i < 100; i++ {
|
||||
_ = opt.Server.EnableLeaderElection
|
||||
}
|
||||
done <- true
|
||||
}()
|
||||
|
||||
go func() {
|
||||
for i := 0; i < 100; i++ {
|
||||
_ = opt.Workflow.MaxWaitBackoffTime
|
||||
}
|
||||
done <- true
|
||||
}()
|
||||
|
||||
go func() {
|
||||
for i := 0; i < 100; i++ {
|
||||
_ = opt.Resource.MaxDispatchConcurrent
|
||||
}
|
||||
done <- true
|
||||
}()
|
||||
|
||||
// Wait for all goroutines
|
||||
for i := 0; i < 3; i++ {
|
||||
<-done
|
||||
}
|
||||
}
|
||||
|
||||
func TestCoreOptions_NilPointerSafety(t *testing.T) {
|
||||
// Ensure NewCoreOptions never returns nil pointers
|
||||
opt := NewCoreOptions()
|
||||
|
||||
// All config modules should be non-nil
|
||||
assert.NotNil(t, opt.Server)
|
||||
assert.NotNil(t, opt.Webhook)
|
||||
assert.NotNil(t, opt.Observability)
|
||||
assert.NotNil(t, opt.Kubernetes)
|
||||
assert.NotNil(t, opt.MultiCluster)
|
||||
assert.NotNil(t, opt.CUE)
|
||||
assert.NotNil(t, opt.Application)
|
||||
assert.NotNil(t, opt.OAM)
|
||||
assert.NotNil(t, opt.Performance)
|
||||
assert.NotNil(t, opt.Workflow)
|
||||
assert.NotNil(t, opt.Admission)
|
||||
assert.NotNil(t, opt.Resource)
|
||||
assert.NotNil(t, opt.Client)
|
||||
assert.NotNil(t, opt.Reconcile)
|
||||
assert.NotNil(t, opt.Sharding)
|
||||
assert.NotNil(t, opt.Feature)
|
||||
assert.NotNil(t, opt.Profiling)
|
||||
assert.NotNil(t, opt.KLog)
|
||||
assert.NotNil(t, opt.Controller)
|
||||
}
|
||||
|
||||
func TestCoreOptions_FlagPrecedence(t *testing.T) {
|
||||
// Test that later flags override earlier ones
|
||||
fs := pflag.NewFlagSet("test", pflag.ContinueOnError)
|
||||
opt := NewCoreOptions()
|
||||
|
||||
for _, f := range opt.Flags().FlagSets {
|
||||
fs.AddFlagSet(f)
|
||||
}
|
||||
|
||||
// Parse with one value, then parse again with different value
|
||||
args1 := []string{"--webhook-port=8080"}
|
||||
err := fs.Parse(args1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 8080, opt.Webhook.WebhookPort)
|
||||
|
||||
// Reset and parse with different value
|
||||
fs = pflag.NewFlagSet("test", pflag.ContinueOnError)
|
||||
opt = NewCoreOptions()
|
||||
for _, f := range opt.Flags().FlagSets {
|
||||
fs.AddFlagSet(f)
|
||||
}
|
||||
|
||||
args2 := []string{"--webhook-port=9090"}
|
||||
err = fs.Parse(args2)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 9090, opt.Webhook.WebhookPort)
|
||||
}
|
||||
|
||||
func TestCoreOptions_AllConfigModulesHaveFlags(t *testing.T) {
|
||||
// Ensure every config module registers at least one flag
|
||||
opt := NewCoreOptions()
|
||||
fss := opt.Flags()
|
||||
|
||||
configsWithExpectedFlags := map[string][]string{
|
||||
"server": {"health-addr", "storage-driver", "enable-leader-election"},
|
||||
"webhook": {"use-webhook", "webhook-cert-dir", "webhook-port"},
|
||||
"observability": {"metrics-addr", "log-debug", "log-file-path"},
|
||||
"kubernetes": {"informer-sync-period", "kube-api-qps", "kube-api-burst"},
|
||||
"multicluster": {"enable-cluster-gateway", "enable-cluster-metrics"},
|
||||
"cue": {"enable-external-package-for-default-compiler"},
|
||||
"application": {"application-re-sync-period"},
|
||||
"oam": {"system-definition-namespace"},
|
||||
"controller": {"revision-limit", "application-revision-limit", "definition-revision-limit"},
|
||||
"performance": {"perf-enabled"},
|
||||
"workflow": {"max-workflow-wait-backoff-time"},
|
||||
"resource": {"max-dispatch-concurrent"},
|
||||
}
|
||||
|
||||
for setName, expectedFlags := range configsWithExpectedFlags {
|
||||
fs := fss.FlagSet(setName)
|
||||
assert.NotNil(t, fs, "FlagSet %s should exist", setName)
|
||||
|
||||
for _, flagName := range expectedFlags {
|
||||
flag := fs.Lookup(flagName)
|
||||
assert.NotNil(t, flag, "Flag %s should exist in flagset %s", flagName, setName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCoreOptions_CLIOverridesWork(t *testing.T) {
|
||||
// This test verifies that CLI flags correctly override default values
|
||||
// and that the sync methods properly propagate these values to globals
|
||||
|
||||
// Store original globals to restore after test
|
||||
origWait := wfTypes.MaxWorkflowWaitBackoffTime
|
||||
origFailed := wfTypes.MaxWorkflowFailedBackoffTime
|
||||
origRetry := wfTypes.MaxWorkflowStepErrorRetryTimes
|
||||
origDispatch := resourcekeeper.MaxDispatchConcurrent
|
||||
origOAMNamespace := oam.SystemDefinitionNamespace
|
||||
origAppPeriod := commonconfig.ApplicationReSyncPeriod
|
||||
origPerf := commonconfig.PerfEnabled
|
||||
origCUEExternal := cuex.EnableExternalPackageForDefaultCompiler
|
||||
origCUEWatch := cuex.EnableExternalPackageWatchForDefaultCompiler
|
||||
|
||||
defer func() {
|
||||
wfTypes.MaxWorkflowWaitBackoffTime = origWait
|
||||
wfTypes.MaxWorkflowFailedBackoffTime = origFailed
|
||||
wfTypes.MaxWorkflowStepErrorRetryTimes = origRetry
|
||||
resourcekeeper.MaxDispatchConcurrent = origDispatch
|
||||
oam.SystemDefinitionNamespace = origOAMNamespace
|
||||
commonconfig.ApplicationReSyncPeriod = origAppPeriod
|
||||
commonconfig.PerfEnabled = origPerf
|
||||
cuex.EnableExternalPackageForDefaultCompiler = origCUEExternal
|
||||
cuex.EnableExternalPackageWatchForDefaultCompiler = origCUEWatch
|
||||
}()
|
||||
|
||||
opt := NewCoreOptions()
|
||||
fs := pflag.NewFlagSet("test", pflag.ContinueOnError)
|
||||
|
||||
for _, f := range opt.Flags().FlagSets {
|
||||
fs.AddFlagSet(f)
|
||||
}
|
||||
|
||||
// Verify defaults first
|
||||
assert.Equal(t, 60, opt.Workflow.MaxWaitBackoffTime, "Default should be 60")
|
||||
assert.Equal(t, 300, opt.Workflow.MaxFailedBackoffTime, "Default should be 300")
|
||||
assert.Equal(t, 10, opt.Workflow.MaxStepErrorRetryTimes, "Default should be 10")
|
||||
assert.Equal(t, 10, opt.Resource.MaxDispatchConcurrent, "Default should be 10")
|
||||
assert.Equal(t, "vela-system", opt.OAM.SystemDefinitionNamespace, "Default should be vela-system")
|
||||
assert.Equal(t, false, opt.Performance.PerfEnabled, "Default should be false")
|
||||
|
||||
// Parse CLI args with overrides
|
||||
args := []string{
|
||||
"--max-workflow-wait-backoff-time=999",
|
||||
"--max-workflow-failed-backoff-time=888",
|
||||
"--max-workflow-step-error-retry-times=77",
|
||||
"--max-dispatch-concurrent=66",
|
||||
"--system-definition-namespace=custom-ns",
|
||||
"--application-re-sync-period=20m",
|
||||
"--perf-enabled=true",
|
||||
"--enable-external-package-for-default-compiler=true",
|
||||
"--enable-external-package-watch-for-default-compiler=true",
|
||||
}
|
||||
|
||||
err := fs.Parse(args)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify struct fields got CLI values (not defaults)
|
||||
assert.Equal(t, 999, opt.Workflow.MaxWaitBackoffTime, "CLI override should be 999")
|
||||
assert.Equal(t, 888, opt.Workflow.MaxFailedBackoffTime, "CLI override should be 888")
|
||||
assert.Equal(t, 77, opt.Workflow.MaxStepErrorRetryTimes, "CLI override should be 77")
|
||||
assert.Equal(t, 66, opt.Resource.MaxDispatchConcurrent, "CLI override should be 66")
|
||||
assert.Equal(t, "custom-ns", opt.OAM.SystemDefinitionNamespace, "CLI override should be custom-ns")
|
||||
assert.Equal(t, 20*time.Minute, opt.Application.ReSyncPeriod, "CLI override should be 20m")
|
||||
assert.Equal(t, true, opt.Performance.PerfEnabled, "CLI override should be true")
|
||||
assert.Equal(t, true, opt.CUE.EnableExternalPackage, "CLI override should be true")
|
||||
assert.Equal(t, true, opt.CUE.EnableExternalPackageWatch, "CLI override should be true")
|
||||
|
||||
// Now sync to globals
|
||||
opt.Workflow.SyncToWorkflowGlobals()
|
||||
opt.Resource.SyncToResourceGlobals()
|
||||
opt.OAM.SyncToOAMGlobals()
|
||||
opt.Application.SyncToApplicationGlobals()
|
||||
opt.Performance.SyncToPerformanceGlobals()
|
||||
opt.CUE.SyncToCUEGlobals()
|
||||
|
||||
// Verify globals got the CLI values
|
||||
assert.Equal(t, 999, wfTypes.MaxWorkflowWaitBackoffTime, "Global should have CLI value")
|
||||
assert.Equal(t, 888, wfTypes.MaxWorkflowFailedBackoffTime, "Global should have CLI value")
|
||||
assert.Equal(t, 77, wfTypes.MaxWorkflowStepErrorRetryTimes, "Global should have CLI value")
|
||||
assert.Equal(t, 66, resourcekeeper.MaxDispatchConcurrent, "Global should have CLI value")
|
||||
assert.Equal(t, "custom-ns", oam.SystemDefinitionNamespace, "Global should have CLI value")
|
||||
assert.Equal(t, 20*time.Minute, commonconfig.ApplicationReSyncPeriod, "Global should have CLI value")
|
||||
assert.Equal(t, true, commonconfig.PerfEnabled, "Global should have CLI value")
|
||||
assert.Equal(t, true, cuex.EnableExternalPackageForDefaultCompiler, "Global should have CLI value")
|
||||
assert.Equal(t, true, cuex.EnableExternalPackageWatchForDefaultCompiler, "Global should have CLI value")
|
||||
}
|
||||
|
||||
func TestCoreOptions_CompleteIntegration(t *testing.T) {
|
||||
// A comprehensive integration test
|
||||
opt := NewCoreOptions()
|
||||
fs := pflag.NewFlagSet("test", pflag.ContinueOnError)
|
||||
|
||||
for _, f := range opt.Flags().FlagSets {
|
||||
fs.AddFlagSet(f)
|
||||
}
|
||||
|
||||
// Simulate a real-world configuration
|
||||
args := []string{
|
||||
// Production-like settings
|
||||
"--enable-leader-election=true",
|
||||
"--leader-election-namespace=vela-system",
|
||||
"--use-webhook=true",
|
||||
"--webhook-port=9443",
|
||||
"--metrics-addr=:8080",
|
||||
"--health-addr=:9440",
|
||||
"--log-debug=false",
|
||||
"--log-file-path=/var/log/vela/core.log",
|
||||
"--log-file-max-size=100",
|
||||
"--kube-api-qps=100",
|
||||
"--kube-api-burst=200",
|
||||
"--enable-cluster-gateway=true",
|
||||
"--enable-cluster-metrics=true",
|
||||
"--cluster-metrics-interval=30s",
|
||||
"--application-re-sync-period=10m",
|
||||
"--perf-enabled=true",
|
||||
"--max-dispatch-concurrent=20",
|
||||
"--max-workflow-wait-backoff-time=120",
|
||||
"--max-workflow-failed-backoff-time=600",
|
||||
}
|
||||
|
||||
err := fs.Parse(args)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify the configuration is production-ready
|
||||
assert.True(t, opt.Server.EnableLeaderElection, "Leader election should be enabled in production")
|
||||
assert.Equal(t, "vela-system", opt.Server.LeaderElectionNamespace)
|
||||
assert.True(t, opt.Webhook.UseWebhook, "Webhook should be enabled in production")
|
||||
assert.Equal(t, 9443, opt.Webhook.WebhookPort)
|
||||
assert.False(t, opt.Observability.LogDebug, "Debug logging should be disabled in production")
|
||||
assert.NotEmpty(t, opt.Observability.LogFilePath, "Log file path should be set in production")
|
||||
|
||||
// Verify performance settings
|
||||
assert.True(t, opt.Performance.PerfEnabled)
|
||||
assert.Equal(t, 20, opt.Resource.MaxDispatchConcurrent)
|
||||
|
||||
// Verify cluster settings
|
||||
assert.True(t, opt.MultiCluster.EnableClusterGateway)
|
||||
assert.True(t, opt.MultiCluster.EnableClusterMetrics)
|
||||
assert.Equal(t, 30*time.Second, opt.MultiCluster.ClusterMetricsInterval)
|
||||
|
||||
// Sync all configurations that need it
|
||||
opt.CUE.SyncToCUEGlobals()
|
||||
opt.Workflow.SyncToWorkflowGlobals()
|
||||
opt.Resource.SyncToResourceGlobals()
|
||||
opt.OAM.SyncToOAMGlobals()
|
||||
opt.Application.SyncToApplicationGlobals()
|
||||
opt.Performance.SyncToPerformanceGlobals()
|
||||
|
||||
// Verify sync worked
|
||||
assert.Equal(t, 20, resourcekeeper.MaxDispatchConcurrent)
|
||||
assert.Equal(t, 120, wfTypes.MaxWorkflowWaitBackoffTime)
|
||||
assert.Equal(t, 600, wfTypes.MaxWorkflowFailedBackoffTime)
|
||||
assert.Equal(t, "vela-system", oam.SystemDefinitionNamespace)
|
||||
assert.Equal(t, 10*time.Minute, commonconfig.ApplicationReSyncPeriod)
|
||||
assert.True(t, commonconfig.PerfEnabled)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,6 +18,7 @@ package app
|
|||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
|
@ -98,14 +99,32 @@ func NewCoreCommand() *cobra.Command {
|
|||
}
|
||||
|
||||
func run(ctx context.Context, s *options.CoreOptions) error {
|
||||
// Sync parsed config values to external package global variables
|
||||
s.Workflow.SyncToWorkflowGlobals()
|
||||
s.CUE.SyncToCUEGlobals()
|
||||
s.Application.SyncToApplicationGlobals()
|
||||
s.Performance.SyncToPerformanceGlobals()
|
||||
s.Resource.SyncToResourceGlobals()
|
||||
s.OAM.SyncToOAMGlobals()
|
||||
|
||||
restConfig := ctrl.GetConfigOrDie()
|
||||
restConfig.UserAgent = types.KubeVelaName + "/" + version.GitRevision
|
||||
restConfig.QPS = float32(s.QPS)
|
||||
restConfig.Burst = s.Burst
|
||||
restConfig.QPS = float32(s.Kubernetes.QPS)
|
||||
restConfig.Burst = s.Kubernetes.Burst
|
||||
restConfig.Wrap(auth.NewImpersonatingRoundTripper)
|
||||
|
||||
// Configure klog based on parsed observability settings
|
||||
if s.Observability.LogDebug {
|
||||
_ = flag.Set("v", strconv.Itoa(int(commonconfig.LogDebug)))
|
||||
}
|
||||
if s.Observability.LogFilePath != "" {
|
||||
_ = flag.Set("logtostderr", "false")
|
||||
_ = flag.Set("log_file", s.Observability.LogFilePath)
|
||||
_ = flag.Set("log_file_max_size", strconv.FormatUint(s.Observability.LogFileMaxSize, 10))
|
||||
}
|
||||
|
||||
// Set logger (use --dev-logs=true for local development)
|
||||
if s.DevLogs {
|
||||
if s.Observability.DevLogs {
|
||||
logOutput := newColorWriter(os.Stdout)
|
||||
klog.LogToStderr(false)
|
||||
klog.SetOutput(logOutput)
|
||||
|
|
@ -122,15 +141,15 @@ func run(ctx context.Context, s *options.CoreOptions) error {
|
|||
go profiling.StartProfilingServer(nil)
|
||||
|
||||
// wrapper the round tripper by multi cluster rewriter
|
||||
if s.EnableClusterGateway {
|
||||
if s.MultiCluster.EnableClusterGateway {
|
||||
client, err := multicluster.Initialize(restConfig, true)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "failed to enable multi-cluster capability")
|
||||
return err
|
||||
}
|
||||
|
||||
if s.EnableClusterMetrics {
|
||||
_, err := multicluster.NewClusterMetricsMgr(context.Background(), client, s.ClusterMetricsInterval)
|
||||
if s.MultiCluster.EnableClusterMetrics {
|
||||
_, err := multicluster.NewClusterMetricsMgr(context.Background(), client, s.MultiCluster.ClusterMetricsInterval)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "failed to enable multi-cluster-metrics capability")
|
||||
return err
|
||||
|
|
@ -139,32 +158,32 @@ func run(ctx context.Context, s *options.CoreOptions) error {
|
|||
}
|
||||
|
||||
if utilfeature.DefaultMutableFeatureGate.Enabled(features.ApplyOnce) {
|
||||
commonconfig.ApplicationReSyncPeriod = s.InformerSyncPeriod
|
||||
commonconfig.ApplicationReSyncPeriod = s.Kubernetes.InformerSyncPeriod
|
||||
}
|
||||
|
||||
leaderElectionID := util.GenerateLeaderElectionID(types.KubeVelaName, s.ControllerArgs.IgnoreAppWithoutControllerRequirement)
|
||||
leaderElectionID := util.GenerateLeaderElectionID(types.KubeVelaName, s.Controller.IgnoreAppWithoutControllerRequirement)
|
||||
leaderElectionID += sharding.GetShardIDSuffix()
|
||||
mgr, err := ctrl.NewManager(restConfig, ctrl.Options{
|
||||
Scheme: scheme,
|
||||
Metrics: metricsserver.Options{
|
||||
BindAddress: s.MetricsAddr,
|
||||
BindAddress: s.Observability.MetricsAddr,
|
||||
},
|
||||
LeaderElection: s.EnableLeaderElection,
|
||||
LeaderElectionNamespace: s.LeaderElectionNamespace,
|
||||
LeaderElection: s.Server.EnableLeaderElection,
|
||||
LeaderElectionNamespace: s.Server.LeaderElectionNamespace,
|
||||
LeaderElectionID: leaderElectionID,
|
||||
WebhookServer: ctrlwebhook.NewServer(ctrlwebhook.Options{
|
||||
Port: s.WebhookPort,
|
||||
CertDir: s.CertDir,
|
||||
Port: s.Webhook.WebhookPort,
|
||||
CertDir: s.Webhook.CertDir,
|
||||
}),
|
||||
HealthProbeBindAddress: s.HealthAddr,
|
||||
LeaseDuration: &s.LeaseDuration,
|
||||
RenewDeadline: &s.RenewDeadLine,
|
||||
RetryPeriod: &s.RetryPeriod,
|
||||
HealthProbeBindAddress: s.Server.HealthAddr,
|
||||
LeaseDuration: &s.Server.LeaseDuration,
|
||||
RenewDeadline: &s.Server.RenewDeadline,
|
||||
RetryPeriod: &s.Server.RetryPeriod,
|
||||
NewClient: velaclient.DefaultNewControllerClient,
|
||||
NewCache: cache.BuildCache(ctx,
|
||||
ctrlcache.Options{
|
||||
Scheme: scheme,
|
||||
SyncPeriod: &s.InformerSyncPeriod,
|
||||
SyncPeriod: &s.Kubernetes.InformerSyncPeriod,
|
||||
// SyncPeriod is configured with default value, aka. 10h. First, controller-runtime does not
|
||||
// recommend use it as a time trigger, instead, it is expected to work for failure tolerance
|
||||
// of controller-runtime. Additionally, set this value will affect not only application
|
||||
|
|
@ -210,7 +229,7 @@ func run(ctx context.Context, s *options.CoreOptions) error {
|
|||
klog.ErrorS(err, "Failed to run manager")
|
||||
return err
|
||||
}
|
||||
if s.LogFilePath != "" {
|
||||
if s.Observability.LogFilePath != "" {
|
||||
klog.Flush()
|
||||
}
|
||||
klog.Info("Safely stops Program...")
|
||||
|
|
@ -228,7 +247,7 @@ func prepareRunInShardingMode(ctx context.Context, mgr manager.Manager, s *optio
|
|||
}
|
||||
} else {
|
||||
klog.Infof("controller running in sharding mode, current shard id: %s", sharding.ShardID)
|
||||
if err := application.Setup(mgr, *s.ControllerArgs); err != nil {
|
||||
if err := application.Setup(mgr, s.Controller.Args); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
|
@ -237,16 +256,16 @@ func prepareRunInShardingMode(ctx context.Context, mgr manager.Manager, s *optio
|
|||
}
|
||||
|
||||
func prepareRun(ctx context.Context, mgr manager.Manager, s *options.CoreOptions) error {
|
||||
if s.UseWebhook {
|
||||
klog.InfoS("Enable webhook", "server port", strconv.Itoa(s.WebhookPort))
|
||||
oamwebhook.Register(mgr, *s.ControllerArgs)
|
||||
if err := waitWebhookSecretVolume(s.CertDir, waitSecretTimeout, waitSecretInterval); err != nil {
|
||||
if s.Webhook.UseWebhook {
|
||||
klog.InfoS("Enable webhook", "server port", strconv.Itoa(s.Webhook.WebhookPort))
|
||||
oamwebhook.Register(mgr, s.Controller.Args)
|
||||
if err := waitWebhookSecretVolume(s.Webhook.CertDir, waitSecretTimeout, waitSecretInterval); err != nil {
|
||||
klog.ErrorS(err, "Unable to get webhook secret")
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := oamv1beta1.Setup(mgr, *s.ControllerArgs); err != nil {
|
||||
if err := oamv1beta1.Setup(mgr, s.Controller.Args); err != nil {
|
||||
klog.ErrorS(err, "Unable to setup the oam controller")
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
40
go.mod
40
go.mod
|
|
@ -3,7 +3,7 @@ module github.com/oam-dev/kubevela
|
|||
go 1.23.8
|
||||
|
||||
require (
|
||||
cuelang.org/go v0.9.2
|
||||
cuelang.org/go v0.14.1
|
||||
github.com/AlecAivazis/survey/v2 v2.1.1
|
||||
github.com/FogDong/uitable v0.0.5
|
||||
github.com/Masterminds/semver v1.5.0
|
||||
|
|
@ -37,8 +37,8 @@ require (
|
|||
github.com/hinshun/vt10x v0.0.0-20180616224451-1954e6464174
|
||||
github.com/imdario/mergo v0.3.16
|
||||
github.com/jeremywohl/flatten/v2 v2.0.0-20211013061545-07e4a09fb8e4
|
||||
github.com/kubevela/pkg v1.9.3-0.20250625225831-a2894a62a307
|
||||
github.com/kubevela/workflow v0.6.3-0.20250717221743-56b80cee4121
|
||||
github.com/kubevela/pkg v1.9.3-0.20251007211343-a91fd1f290c6
|
||||
github.com/kubevela/workflow v0.6.3-0.20251007211423-415593c3cee0
|
||||
github.com/kyokomi/emoji v2.2.4+incompatible
|
||||
github.com/magiconair/properties v1.8.7
|
||||
github.com/mattn/go-runewidth v0.0.15
|
||||
|
|
@ -59,21 +59,21 @@ require (
|
|||
github.com/prometheus/client_model v0.6.1
|
||||
github.com/rivo/tview v0.0.0-20221128165837-db36428c92d9
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/spf13/cobra v1.8.1
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/spf13/cobra v1.9.1
|
||||
github.com/spf13/pflag v1.0.7
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/tidwall/gjson v1.14.4
|
||||
github.com/wercker/stern v0.0.0-20190705090245-4fa46dd6987f
|
||||
github.com/xlab/treeprint v1.2.0
|
||||
gitlab.com/gitlab-org/api/client-go v0.127.0
|
||||
go.uber.org/multierr v1.11.0
|
||||
golang.org/x/crypto v0.37.0
|
||||
golang.org/x/mod v0.24.0
|
||||
golang.org/x/oauth2 v0.29.0
|
||||
golang.org/x/sync v0.13.0
|
||||
golang.org/x/term v0.31.0
|
||||
golang.org/x/text v0.24.0
|
||||
golang.org/x/tools v0.31.0
|
||||
golang.org/x/crypto v0.40.0
|
||||
golang.org/x/mod v0.26.0
|
||||
golang.org/x/oauth2 v0.30.0
|
||||
golang.org/x/sync v0.16.0
|
||||
golang.org/x/term v0.33.0
|
||||
golang.org/x/text v0.27.0
|
||||
golang.org/x/tools v0.35.0
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
helm.sh/helm/v3 v3.14.4
|
||||
|
|
@ -99,7 +99,7 @@ require (
|
|||
)
|
||||
|
||||
require (
|
||||
cuelabs.dev/go/oci/ociregistry v0.0.0-20240404174027-a39bec0462d2 // indirect
|
||||
cuelabs.dev/go/oci/ociregistry v0.0.0-20250715075730-49cab49c8e9d // indirect
|
||||
dario.cat/mergo v1.0.0 // indirect
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
|
||||
|
|
@ -134,7 +134,7 @@ require (
|
|||
github.com/containerd/stargz-snapshotter/estargz v0.15.1 // indirect
|
||||
github.com/coreos/go-semver v0.3.1 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect
|
||||
github.com/creack/pty v1.1.18 // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.4.1 // indirect
|
||||
github.com/distribution/reference v0.6.0 // indirect
|
||||
|
|
@ -145,7 +145,7 @@ require (
|
|||
github.com/docker/go-connections v0.5.0 // indirect
|
||||
github.com/docker/go-metrics v0.0.1 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.12.0 // indirect
|
||||
github.com/emicklei/proto v1.10.0 // indirect
|
||||
github.com/emicklei/proto v1.14.2 // indirect
|
||||
github.com/emirpasic/gods v1.18.1 // indirect
|
||||
github.com/evanphx/json-patch v5.7.0+incompatible // indirect
|
||||
github.com/evanphx/json-patch/v5 v5.9.0 // indirect
|
||||
|
|
@ -232,15 +232,16 @@ require (
|
|||
github.com/oasdiff/yaml v0.0.0-20250309154309-f31be36b4037 // indirect
|
||||
github.com/oasdiff/yaml3 v0.0.0-20250309153720-d2182401db90 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.1 // indirect
|
||||
github.com/openshift/library-go v0.0.0-20230327085348-8477ec72b725 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
|
||||
github.com/perimeterx/marshmallow v1.1.5 // indirect
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
|
||||
github.com/pjbgf/sha1cd v0.3.2 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus/common v0.55.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/protocolbuffers/txtpbfmt v0.0.0-20230328191034-3462fbc510c0 // indirect
|
||||
github.com/protocolbuffers/txtpbfmt v0.0.0-20250627152318-f293424e46b5 // indirect
|
||||
github.com/rivo/uniseg v0.4.3 // indirect
|
||||
github.com/robfig/cron/v3 v3.0.1 // indirect
|
||||
github.com/rogpeppe/go-internal v1.14.1 // indirect
|
||||
|
|
@ -276,9 +277,10 @@ require (
|
|||
go.uber.org/automaxprocs v1.5.3 // indirect
|
||||
go.uber.org/zap v1.26.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
|
||||
golang.org/x/net v0.39.0 // indirect
|
||||
golang.org/x/sys v0.32.0 // indirect
|
||||
golang.org/x/net v0.42.0 // indirect
|
||||
golang.org/x/sys v0.34.0 // indirect
|
||||
golang.org/x/time v0.10.0 // indirect
|
||||
golang.org/x/tools/go/expect v0.1.0-deprecated // indirect
|
||||
google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 // indirect
|
||||
|
|
|
|||
82
go.sum
82
go.sum
|
|
@ -1,9 +1,9 @@
|
|||
cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A=
|
||||
cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow=
|
||||
cuelabs.dev/go/oci/ociregistry v0.0.0-20240404174027-a39bec0462d2 h1:BnG6pr9TTr6CYlrJznYUDj6V7xldD1W+1iXPum0wT/w=
|
||||
cuelabs.dev/go/oci/ociregistry v0.0.0-20240404174027-a39bec0462d2/go.mod h1:pK23AUVXuNzzTpfMCA06sxZGeVQ/75FdVtW249de9Uo=
|
||||
cuelang.org/go v0.9.2 h1:pfNiry2PdRBr02G/aKm5k2vhzmqbAOoaB4WurmEbWvs=
|
||||
cuelang.org/go v0.9.2/go.mod h1:qpAYsLOf7gTM1YdEg6cxh553uZ4q9ZDWlPbtZr9q1Wk=
|
||||
cuelabs.dev/go/oci/ociregistry v0.0.0-20250715075730-49cab49c8e9d h1:lX0EawyoAu4kgMJJfy7MmNkIHioBcdBGFRSKDZ+CWo0=
|
||||
cuelabs.dev/go/oci/ociregistry v0.0.0-20250715075730-49cab49c8e9d/go.mod h1:4WWeZNxUO1vRoZWAHIG0KZOd6dA25ypyWuwD3ti0Tdc=
|
||||
cuelang.org/go v0.14.1 h1:kxFAHr7bvrCikbtVps2chPIARazVdnRmlz65dAzKyWg=
|
||||
cuelang.org/go v0.14.1/go.mod h1:aSP9UZUM5m2izHAHUvqtq0wTlWn5oLjuv2iBMQZBLLs=
|
||||
dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
|
||||
dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
|
||||
filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
|
||||
|
|
@ -144,8 +144,8 @@ github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03V
|
|||
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
|
||||
github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
|
||||
|
|
@ -185,8 +185,8 @@ github.com/elazarl/goproxy v1.7.2 h1:Y2o6urb7Eule09PjlhQRGNsqRfPmYI3KKQLFpCAV3+o
|
|||
github.com/elazarl/goproxy v1.7.2/go.mod h1:82vkLNir0ALaW14Rc399OTTjyNREgmdL2cVoIbS6XaE=
|
||||
github.com/emicklei/go-restful/v3 v3.12.0 h1:y2DdzBAURM29NFF94q6RaY4vjIH1rtwDapwQtU84iWk=
|
||||
github.com/emicklei/go-restful/v3 v3.12.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/emicklei/proto v1.10.0 h1:pDGyFRVV5RvV+nkBK9iy3q67FBy9Xa7vwrOTE+g5aGw=
|
||||
github.com/emicklei/proto v1.10.0/go.mod h1:rn1FgRS/FANiZdD2djyH7TMA9jdRDcYQ9IEN9yvjX0A=
|
||||
github.com/emicklei/proto v1.14.2 h1:wJPxPy2Xifja9cEMrcA/g08art5+7CGJNFNk35iXC1I=
|
||||
github.com/emicklei/proto v1.14.2/go.mod h1:rn1FgRS/FANiZdD2djyH7TMA9jdRDcYQ9IEN9yvjX0A=
|
||||
github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc=
|
||||
github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
|
|
@ -476,10 +476,10 @@ github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
|
|||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/kubevela/pkg v1.9.3-0.20250625225831-a2894a62a307 h1:6vebFO0h5vU/0gSol3l/9KlgZeuZzYhl3/DlDr0jI6E=
|
||||
github.com/kubevela/pkg v1.9.3-0.20250625225831-a2894a62a307/go.mod h1:P1yK32LmSs+NRjGu3Wu45VeCeKgIXiRg4qItN1MbgA8=
|
||||
github.com/kubevela/workflow v0.6.3-0.20250717221743-56b80cee4121 h1:clU2P7FyrhLm1l/xviiLO1Cen00ZI01oOfPxAOoMi0w=
|
||||
github.com/kubevela/workflow v0.6.3-0.20250717221743-56b80cee4121/go.mod h1:79KSLzfgBnJboWgxy5P/1GCc2ZUOLEYlF+vS4xQ3FNo=
|
||||
github.com/kubevela/pkg v1.9.3-0.20251007211343-a91fd1f290c6 h1:7REKNm1RC8pcvkYKyTD+mRMWp10+Jdkv/YqL/LE6VSE=
|
||||
github.com/kubevela/pkg v1.9.3-0.20251007211343-a91fd1f290c6/go.mod h1:P5FmkdwbXKt42LOhR0oMfMiQffYYKie9s2mLDJaPzjc=
|
||||
github.com/kubevela/workflow v0.6.3-0.20251007211423-415593c3cee0 h1:23CTZ0d7/KOB1TH52E+dvzKkp5vCMcrqGyyGLOelb9c=
|
||||
github.com/kubevela/workflow v0.6.3-0.20251007211423-415593c3cee0/go.mod h1:8mZrIj+6Oe08ikz/IpvGFTB/WzP2stwb8G6M01TCqac=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/kyokomi/emoji v2.2.4+incompatible h1:np0woGKwx9LiHAQmwZx79Oc0rHpNw3o+3evou4BEPv4=
|
||||
|
|
@ -596,8 +596,8 @@ github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8=
|
|||
github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
|
||||
github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
|
||||
github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040=
|
||||
github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M=
|
||||
github.com/openkruise/kruise-api v1.4.0 h1:MDDXQIYvaCh0ioIJSRniF4kCKby9JI3/ec6pZHHw/Ao=
|
||||
github.com/openkruise/kruise-api v1.4.0/go.mod h1:HyRlDV0MfW5Zm+3g36bx7u4CcWHcKBxL8g/c/2bjcd4=
|
||||
github.com/openkruise/rollouts v0.3.0 h1:T02r9BxHJ02MRkbc7C4F12qMGgrziZVjgmukwz6k60s=
|
||||
|
|
@ -607,6 +607,8 @@ github.com/openshift/library-go v0.0.0-20230327085348-8477ec72b725/go.mod h1:Osp
|
|||
github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
|
||||
github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8=
|
||||
github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
|
||||
github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4=
|
||||
github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY=
|
||||
github.com/perimeterx/marshmallow v1.1.5 h1:a2LALqQ1BlHM8PZblsDdidgv1mWi1DgC2UmX50IvK2s=
|
||||
github.com/perimeterx/marshmallow v1.1.5/go.mod h1:dsXbUu8CRzfYP5a87xpp0xq9S3u0Vchtcl8we9tYaXw=
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
|
||||
|
|
@ -655,8 +657,8 @@ github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1
|
|||
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/protocolbuffers/txtpbfmt v0.0.0-20230328191034-3462fbc510c0 h1:sadMIsgmHpEOGbUs6VtHBXRR1OHevnj7hLx9ZcdNGW4=
|
||||
github.com/protocolbuffers/txtpbfmt v0.0.0-20230328191034-3462fbc510c0/go.mod h1:jgxiZysxFPM+iWKwQwPR+y+Jvo54ARd4EisXxKYpB5c=
|
||||
github.com/protocolbuffers/txtpbfmt v0.0.0-20250627152318-f293424e46b5 h1:WWs1ZFnGobK5ZXNu+N9If+8PDNVB9xAqrib/stUXsV4=
|
||||
github.com/protocolbuffers/txtpbfmt v0.0.0-20250627152318-f293424e46b5/go.mod h1:BnHogPTyzYAReeQLZrOxyxzS739DaTNtTvohVdbENmA=
|
||||
github.com/rivo/tview v0.0.0-20221128165837-db36428c92d9 h1:ccTgRxA37ypj3q8zB8G4k3xGPfBbIaMwrf3Yw6k50NY=
|
||||
github.com/rivo/tview v0.0.0-20221128165837-db36428c92d9/go.mod h1:YX2wUZOcJGOIycErz2s9KvDaP0jnWwRCirQMPLPpQ+Y=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
|
|
@ -691,10 +693,12 @@ github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkU
|
|||
github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w=
|
||||
github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU=
|
||||
github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g=
|
||||
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
|
||||
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
|
||||
github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/pflag v1.0.7 h1:vN6T9TfwStFPFM5XzjsvmzZkLuaLX+HS+0SeFLRgU6M=
|
||||
github.com/spf13/pflag v1.0.7/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU=
|
||||
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
|
|
@ -817,8 +821,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y
|
|||
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
|
||||
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
|
||||
golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE=
|
||||
golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc=
|
||||
golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM=
|
||||
golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
|
||||
|
|
@ -833,8 +837,8 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
|||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU=
|
||||
golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
|
||||
golang.org/x/mod v0.26.0 h1:EGMPT//Ezu+ylkCijjPc+f4Aih7sZvaAr+O3EHBxvZg=
|
||||
golang.org/x/mod v0.26.0/go.mod h1:/j6NAhSk8iQ723BGAUyoAcn7SlD7s15Dp9Nd/SfeaFQ=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
|
|
@ -861,8 +865,8 @@ golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
|
|||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
|
||||
golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY=
|
||||
golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E=
|
||||
golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs=
|
||||
golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
|
|
@ -870,8 +874,8 @@ golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ
|
|||
golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.29.0 h1:WdYw2tdTK1S8olAzWHdgeqfy+Mtm9XNhv/xJsY65d98=
|
||||
golang.org/x/oauth2 v0.29.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
|
||||
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
|
||||
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
|
|
@ -882,8 +886,8 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ
|
|||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610=
|
||||
golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
|
||||
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
|
|
@ -922,16 +926,16 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20=
|
||||
golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA=
|
||||
golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
|
||||
golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o=
|
||||
golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw=
|
||||
golang.org/x/term v0.33.0 h1:NuFncQrRcaRvVmgRkvM3j/F00gWIAlcmlB8ACEKmGIg=
|
||||
golang.org/x/term v0.33.0/go.mod h1:s18+ql9tYWp1IfpV9DmCtQDDSRBUjKaw9M1eAv5UeF0=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
|
|
@ -942,8 +946,8 @@ golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
|||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0=
|
||||
golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU=
|
||||
golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4=
|
||||
golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU=
|
||||
golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.10.0 h1:3usCWA8tQn0L8+hFJQNgzpWbd89begxN66o1Ojdn5L4=
|
||||
golang.org/x/time v0.10.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
|
|
@ -960,8 +964,12 @@ golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
|||
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.31.0 h1:0EedkvKDbh+qistFTd0Bcwe/YLh4vHwWEkiI0toFIBU=
|
||||
golang.org/x/tools v0.31.0/go.mod h1:naFTU+Cev749tSJRXJlna0T3WxKvb1kWEx15xA4SdmQ=
|
||||
golang.org/x/tools v0.35.0 h1:mBffYraMEf7aa0sB+NuKnuCy8qI/9Bughn8dC2Gu5r0=
|
||||
golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw=
|
||||
golang.org/x/tools/go/expect v0.1.0-deprecated h1:jY2C5HGYR5lqex3gEniOQL0r7Dq5+VGVgY1nudX5lXY=
|
||||
golang.org/x/tools/go/expect v0.1.0-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY=
|
||||
golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated h1:1h2MnaIAIXISqTFKdENegdpAgUXz6NrPEsbIeWaBRvM=
|
||||
golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated/go.mod h1:RVAQXBGNv1ib0J382/DPCRS/BPnsGebyM1Gj5VSDpG8=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
|
|
|||
|
|
@ -153,9 +153,8 @@ func main() {
|
|||
lines[idx] = ""
|
||||
continue
|
||||
}
|
||||
if strings.Contains(line, ".md") && strings.Contains(line, "](") {
|
||||
lines[idx] = strings.Replace(line, ".md", "", -1)
|
||||
}
|
||||
// Don't remove .md extensions - Docusaurus handles them properly
|
||||
// and keeping them provides better IDE support and compatibility
|
||||
}
|
||||
|
||||
newlines = append(newlines, lines...)
|
||||
|
|
|
|||
|
|
@ -42,12 +42,12 @@ var ComponentDefDirs = []string{"./vela-templates/definitions/internal/component
|
|||
|
||||
// CustomComponentHeaderEN .
|
||||
var CustomComponentHeaderEN = `---
|
||||
title: Built-in ParsedComponents Type
|
||||
title: Built-in Components Type
|
||||
---
|
||||
|
||||
This documentation will walk through all the built-in component types sorted alphabetically.
|
||||
|
||||
` + fmt.Sprintf("> It was generated automatically by [scripts](../../contributor/cli-ref-doc), please don't update manually, last updated at %s.\n\n", time.Now().Format(time.RFC3339))
|
||||
` + fmt.Sprintf("> It was generated automatically by [scripts](../../contributor/cli-ref-doc.md), please don't update manually, last updated at %s.\n\n", time.Now().Format(time.RFC3339))
|
||||
|
||||
// CustomComponentHeaderZH .
|
||||
var CustomComponentHeaderZH = `---
|
||||
|
|
@ -56,7 +56,7 @@ title: 内置组件列表
|
|||
|
||||
本文档将**按字典序**展示所有内置组件的参数列表。
|
||||
|
||||
` + fmt.Sprintf("> 本文档由[脚本](../../contributor/cli-ref-doc)自动生成,请勿手动修改,上次更新于 %s。\n\n", time.Now().Format(time.RFC3339))
|
||||
` + fmt.Sprintf("> 本文档由[脚本](../../contributor/cli-ref-doc.md)自动生成,请勿手动修改,上次更新于 %s。\n\n", time.Now().Format(time.RFC3339))
|
||||
|
||||
// ComponentDef generate component def reference doc
|
||||
func ComponentDef(ctx context.Context, c common.Args, opt Options) {
|
||||
|
|
|
|||
|
|
@ -47,7 +47,7 @@ title: Built-in Policy Type
|
|||
|
||||
This documentation will walk through all the built-in policy types sorted alphabetically.
|
||||
|
||||
` + fmt.Sprintf("> It was generated automatically by [scripts](../../contributor/cli-ref-doc), please don't update manually, last updated at %s.\n\n", time.Now().Format(time.RFC3339))
|
||||
` + fmt.Sprintf("> It was generated automatically by [scripts](../../contributor/cli-ref-doc.md), please don't update manually, last updated at %s.\n\n", time.Now().Format(time.RFC3339))
|
||||
|
||||
// CustomPolicyHeaderZH .
|
||||
var CustomPolicyHeaderZH = `---
|
||||
|
|
@ -56,7 +56,7 @@ title: 内置策略列表
|
|||
|
||||
本文档将**按字典序**展示所有内置策略的参数列表。
|
||||
|
||||
` + fmt.Sprintf("> 本文档由[脚本](../../contributor/cli-ref-doc)自动生成,请勿手动修改,上次更新于 %s。\n\n", time.Now().Format(time.RFC3339))
|
||||
` + fmt.Sprintf("> 本文档由[脚本](../../contributor/cli-ref-doc.md)自动生成,请勿手动修改,上次更新于 %s。\n\n", time.Now().Format(time.RFC3339))
|
||||
|
||||
// PolicyDef generate policy def reference doc
|
||||
func PolicyDef(ctx context.Context, c common.Args, opt Options) {
|
||||
|
|
|
|||
|
|
@ -47,7 +47,7 @@ title: Built-in Trait Type
|
|||
|
||||
This documentation will walk through all the built-in trait types sorted alphabetically.
|
||||
|
||||
` + fmt.Sprintf("> It was generated automatically by [scripts](../../contributor/cli-ref-doc), please don't update manually, last updated at %s.\n\n", time.Now().Format(time.RFC3339))
|
||||
` + fmt.Sprintf("> It was generated automatically by [scripts](../../contributor/cli-ref-doc.md), please don't update manually, last updated at %s.\n\n", time.Now().Format(time.RFC3339))
|
||||
|
||||
// CustomTraitHeaderZH .
|
||||
var CustomTraitHeaderZH = `---
|
||||
|
|
@ -56,7 +56,7 @@ title: 内置运维特征列表
|
|||
|
||||
本文档将**按字典序**展示所有内置运维特征的参数列表。
|
||||
|
||||
` + fmt.Sprintf("> 本文档由[脚本](../../contributor/cli-ref-doc)自动生成,请勿手动修改,上次更新于 %s。\n\n", time.Now().Format(time.RFC3339))
|
||||
` + fmt.Sprintf("> 本文档由[脚本](../../contributor/cli-ref-doc.md)自动生成,请勿手动修改,上次更新于 %s。\n\n", time.Now().Format(time.RFC3339))
|
||||
|
||||
// TraitDef generate trait def reference doc
|
||||
func TraitDef(ctx context.Context, c common.Args, opt Options) {
|
||||
|
|
|
|||
|
|
@ -47,7 +47,7 @@ title: Built-in WorkflowStep Type
|
|||
|
||||
This documentation will walk through all the built-in workflow step types sorted alphabetically.
|
||||
|
||||
` + fmt.Sprintf("> It was generated automatically by [scripts](../../contributor/cli-ref-doc), please don't update manually, last updated at %s.\n\n", time.Now().Format(time.RFC3339))
|
||||
` + fmt.Sprintf("> It was generated automatically by [scripts](../../contributor/cli-ref-doc.md), please don't update manually, last updated at %s.\n\n", time.Now().Format(time.RFC3339))
|
||||
|
||||
// CustomWorkflowHeaderZH .
|
||||
var CustomWorkflowHeaderZH = `---
|
||||
|
|
@ -56,7 +56,7 @@ title: 内置工作流步骤列表
|
|||
|
||||
本文档将**按字典序**展示所有内置工作流步骤的参数列表。
|
||||
|
||||
` + fmt.Sprintf("> 本文档由[脚本](../../contributor/cli-ref-doc)自动生成,请勿手动修改,上次更新于 %s。\n\n", time.Now().Format(time.RFC3339))
|
||||
` + fmt.Sprintf("> 本文档由[脚本](../../contributor/cli-ref-doc.md)自动生成,请勿手动修改,上次更新于 %s。\n\n", time.Now().Format(time.RFC3339))
|
||||
|
||||
// WorkflowDef generate workflow def reference doc
|
||||
func WorkflowDef(ctx context.Context, c common.Args, opt Options) {
|
||||
|
|
|
|||
|
|
@ -50,7 +50,7 @@ else
|
|||
GOIMPORTS=$(shell which goimports)
|
||||
endif
|
||||
|
||||
CUE_VERSION ?= v0.9.2
|
||||
CUE_VERSION ?= v0.14.1
|
||||
.PHONY: installcue
|
||||
installcue:
|
||||
ifeq (, $(shell which cue))
|
||||
|
|
|
|||
|
|
@ -152,7 +152,7 @@ func parseHeaders(obj cue.Value, label string) (http.Header, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
h.Add(iter.Label(), str)
|
||||
h.Add(iter.Selector().Unquoted(), str)
|
||||
}
|
||||
return h, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -16,10 +16,6 @@ limitations under the License.
|
|||
|
||||
package core_oam_dev
|
||||
|
||||
import (
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
// Args args used by controller
|
||||
type Args struct {
|
||||
|
||||
|
|
@ -47,17 +43,3 @@ type Args struct {
|
|||
// IgnoreDefinitionWithoutControllerRequirement indicates that trait/component/workflowstep definition controller will not process the definition without 'definition.oam.dev/controller-version-require' annotation.
|
||||
IgnoreDefinitionWithoutControllerRequirement bool
|
||||
}
|
||||
|
||||
// AddFlags adds flags to the specified FlagSet
|
||||
func (a *Args) AddFlags(fs *pflag.FlagSet, c *Args) {
|
||||
fs.IntVar(&a.RevisionLimit, "revision-limit", c.RevisionLimit,
|
||||
"RevisionLimit is the maximum number of revisions that will be maintained. The default value is 50.")
|
||||
fs.IntVar(&a.AppRevisionLimit, "application-revision-limit", c.AppRevisionLimit,
|
||||
"application-revision-limit is the maximum number of application useless revisions that will be maintained, if the useless revisions exceed this number, older ones will be GCed first.The default value is 10.")
|
||||
fs.IntVar(&a.DefRevisionLimit, "definition-revision-limit", c.DefRevisionLimit,
|
||||
"definition-revision-limit is the maximum number of component/trait definition useless revisions that will be maintained, if the useless revisions exceed this number, older ones will be GCed first.The default value is 20.")
|
||||
fs.BoolVar(&a.AutoGenWorkloadDefinition, "autogen-workload-definition", c.AutoGenWorkloadDefinition, "Automatic generated workloadDefinition which componentDefinition refers to.")
|
||||
fs.IntVar(&a.ConcurrentReconciles, "concurrent-reconciles", c.ConcurrentReconciles, "concurrent-reconciles is the concurrent reconcile number of the controller. The default value is 4")
|
||||
fs.BoolVar(&a.IgnoreAppWithoutControllerRequirement, "ignore-app-without-controller-version", c.IgnoreAppWithoutControllerRequirement, "If true, application controller will not process the app without 'app.oam.dev/controller-version-require' annotation")
|
||||
fs.BoolVar(&a.IgnoreDefinitionWithoutControllerRequirement, "ignore-definition-without-controller-version", c.IgnoreDefinitionWithoutControllerRequirement, "If true, trait/component/workflowstep definition controller will not process the definition without 'definition.oam.dev/controller-version-require' annotation")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1477,7 +1477,7 @@ var _ = Describe("Test Application Controller", func() {
|
|||
Outputs: workflowv1alpha1.StepOutputs{
|
||||
{
|
||||
Name: "output",
|
||||
ValueFrom: "context.name",
|
||||
ValueFrom: `"app-with-skip-output"`,
|
||||
},
|
||||
},
|
||||
Properties: &runtime.RawExtension{Raw: []byte(`{"component":"myweb1"}`)},
|
||||
|
|
@ -1492,7 +1492,7 @@ var _ = Describe("Test Application Controller", func() {
|
|||
ParameterKey: "",
|
||||
},
|
||||
},
|
||||
If: `inputs.output == "app-with-timeout-output"`,
|
||||
If: `inputs.output == "app-with-skip-output"`,
|
||||
Type: "apply-component",
|
||||
Properties: &runtime.RawExtension{Raw: []byte(`{"component":"myweb2"}`)},
|
||||
},
|
||||
|
|
|
|||
|
|
@ -8,6 +8,8 @@ spec:
|
|||
schematic:
|
||||
cue:
|
||||
template: |
|
||||
import "list"
|
||||
|
||||
pvcVolumesList: *[
|
||||
for v in parameter.pvc if v.mountPath != _|_ {
|
||||
{
|
||||
|
|
@ -24,7 +26,7 @@ spec:
|
|||
}
|
||||
},
|
||||
] | []
|
||||
volumesList: pvcVolumesList + configMapVolumesList
|
||||
volumesList: list.Concat([pvcVolumesList, configMapVolumesList])
|
||||
deDupVolumesArray: [
|
||||
for val in [
|
||||
for i, vi in volumesList {
|
||||
|
|
|
|||
|
|
@ -47,8 +47,9 @@ func GetParameters(templateStr string) ([]types.Parameter, error) {
|
|||
if iter.Selector().IsDefinition() {
|
||||
continue
|
||||
}
|
||||
name := GetSelectorLabel(iter.Selector())
|
||||
var param = types.Parameter{
|
||||
Name: iter.Label(),
|
||||
Name: name,
|
||||
Required: !iter.IsOptional(),
|
||||
}
|
||||
val := iter.Value()
|
||||
|
|
|
|||
|
|
@ -78,4 +78,25 @@ func TestGetParameter(t *testing.T) {
|
|||
}
|
||||
}
|
||||
assert.Equal(t, flag, true)
|
||||
|
||||
// Test pattern parameter selectors which would cause panic with Unquoted()
|
||||
data, _ = os.ReadFile("testdata/workloads/pattern-params.cue")
|
||||
params, err = GetParameters(string(data))
|
||||
assert.NoError(t, err) // Should not panic
|
||||
// We should get the regular parameters but pattern selectors are handled safely
|
||||
assert.GreaterOrEqual(t, len(params), 2) // At least name and port
|
||||
foundName := false
|
||||
foundPort := false
|
||||
for _, p := range params {
|
||||
if p.Name == "name" {
|
||||
foundName = true
|
||||
assert.Equal(t, cue.StringKind, p.Type)
|
||||
}
|
||||
if p.Name == "port" {
|
||||
foundPort = true
|
||||
assert.Equal(t, int64(8080), p.Default)
|
||||
}
|
||||
}
|
||||
assert.True(t, foundName, "Should find 'name' parameter")
|
||||
assert.True(t, foundPort, "Should find 'port' parameter")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -27,6 +27,8 @@ import (
|
|||
"github.com/kubevela/workflow/pkg/cue/model/value"
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
velacue "github.com/oam-dev/kubevela/pkg/cue"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -143,7 +145,8 @@ func getStatusMap(templateContext map[string]interface{}, statusFields string, p
|
|||
return templateContext, nil, errors.WithMessage(err, "get context fields")
|
||||
}
|
||||
for iter.Next() {
|
||||
contextLabels = append(contextLabels, iter.Label())
|
||||
label := velacue.GetSelectorLabel(iter.Selector())
|
||||
contextLabels = append(contextLabels, label)
|
||||
}
|
||||
|
||||
cueBuffer := runtimeContextBuff + "\n" + statusFields
|
||||
|
|
@ -160,7 +163,7 @@ func getStatusMap(templateContext map[string]interface{}, statusFields string, p
|
|||
|
||||
outer:
|
||||
for iter.Next() {
|
||||
label := iter.Label()
|
||||
label := velacue.GetSelectorLabel(iter.Selector())
|
||||
|
||||
if len(label) >= 32 {
|
||||
klog.Warningf("status.details field label %s is too long, skipping", label)
|
||||
|
|
|
|||
|
|
@ -476,11 +476,11 @@ func TestContextPassing(t *testing.T) {
|
|||
statusCtx := ctx["status"].(map[string]interface{})
|
||||
details := statusCtx["details"].(map[string]interface{})
|
||||
|
||||
assert.Equal(t, 3, details["replicas"])
|
||||
assert.Equal(t, 8080, details["port"])
|
||||
assert.Equal(t, int64(3), details["replicas"])
|
||||
assert.Equal(t, int64(8080), details["port"])
|
||||
assert.Equal(t, true, details["isReady"])
|
||||
assert.Equal(t, true, details["configEnabled"])
|
||||
assert.Equal(t, 30, details["configTimeout"])
|
||||
assert.Equal(t, int64(30), details["configTimeout"])
|
||||
|
||||
assert.Nil(t, details["config"])
|
||||
},
|
||||
|
|
@ -518,9 +518,9 @@ func TestContextPassing(t *testing.T) {
|
|||
|
||||
ports := details["$ports"].([]interface{})
|
||||
assert.Len(t, ports, 3)
|
||||
assert.Equal(t, 80, ports[0])
|
||||
assert.Equal(t, 443, ports[1])
|
||||
assert.Equal(t, 8080, ports[2])
|
||||
assert.Equal(t, int64(80), ports[0])
|
||||
assert.Equal(t, int64(443), ports[1])
|
||||
assert.Equal(t, int64(8080), ports[2])
|
||||
|
||||
protocols := details["$protocols"].([]interface{})
|
||||
assert.Len(t, protocols, 3)
|
||||
|
|
@ -530,8 +530,8 @@ func TestContextPassing(t *testing.T) {
|
|||
mappings := details["$mappings"].([]interface{})
|
||||
assert.Len(t, mappings, 2)
|
||||
|
||||
assert.Equal(t, 3, details["portCount"])
|
||||
assert.Equal(t, 80, details["firstPort"])
|
||||
assert.Equal(t, int64(3), details["portCount"])
|
||||
assert.Equal(t, int64(80), details["firstPort"])
|
||||
assert.Equal(t, "http", details["mainProtocol"])
|
||||
assert.Equal(t, "80,443,8080", details["portsString"])
|
||||
},
|
||||
|
|
@ -631,9 +631,9 @@ func TestContextPassing(t *testing.T) {
|
|||
statusCtx := ctx["status"].(map[string]interface{})
|
||||
details := statusCtx["details"].(map[string]interface{})
|
||||
|
||||
assert.Equal(t, 2, details["$multiplier"])
|
||||
assert.Equal(t, 5, details["$offset"])
|
||||
assert.Equal(t, 25, details["result"])
|
||||
assert.Equal(t, int64(2), details["$multiplier"])
|
||||
assert.Equal(t, int64(5), details["$offset"])
|
||||
assert.Equal(t, int64(25), details["result"])
|
||||
assert.Equal(t, "Result is 25", details["displayText"])
|
||||
},
|
||||
},
|
||||
|
|
@ -670,8 +670,8 @@ func TestContextPassing(t *testing.T) {
|
|||
statusCtx := ctx["status"].(map[string]interface{})
|
||||
assert.Equal(t, false, statusCtx["healthy"])
|
||||
details := statusCtx["details"].(map[string]interface{})
|
||||
assert.Equal(t, 5, details["replicas"])
|
||||
assert.Equal(t, 3, details["readyReplicas"])
|
||||
assert.Equal(t, int64(5), details["replicas"])
|
||||
assert.Equal(t, int64(3), details["readyReplicas"])
|
||||
},
|
||||
},
|
||||
"message-references-health-and-details": {
|
||||
|
|
@ -781,3 +781,185 @@ func TestContextPassing(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetStatusWithDefinitionAndHiddenLabels(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
templateContext map[string]interface{}
|
||||
statusFields string
|
||||
wantNoErr bool
|
||||
description string
|
||||
}{
|
||||
{
|
||||
name: "handles definition labels without panic",
|
||||
templateContext: map[string]interface{}{
|
||||
"output": map[string]interface{}{
|
||||
"metadata": map[string]interface{}{
|
||||
"name": "test",
|
||||
},
|
||||
},
|
||||
},
|
||||
statusFields: `
|
||||
#SomeDefinition: {
|
||||
name: string
|
||||
type: string
|
||||
}
|
||||
|
||||
status: #SomeDefinition & {
|
||||
name: "test"
|
||||
type: "healthy"
|
||||
}
|
||||
`,
|
||||
wantNoErr: true,
|
||||
description: "Should handle definition labels (#SomeDefinition) without panicking",
|
||||
},
|
||||
{
|
||||
name: "handles hidden labels without panic",
|
||||
templateContext: map[string]interface{}{
|
||||
"output": map[string]interface{}{
|
||||
"metadata": map[string]interface{}{
|
||||
"name": "test",
|
||||
},
|
||||
},
|
||||
},
|
||||
statusFields: `
|
||||
_hiddenField: "internal"
|
||||
|
||||
status: {
|
||||
name: "test"
|
||||
internal: _hiddenField
|
||||
}
|
||||
`,
|
||||
wantNoErr: true,
|
||||
description: "Should handle hidden labels (_hiddenField) without panicking",
|
||||
},
|
||||
{
|
||||
name: "handles pattern labels without panic",
|
||||
templateContext: map[string]interface{}{
|
||||
"output": map[string]interface{}{
|
||||
"metadata": map[string]interface{}{
|
||||
"name": "test",
|
||||
},
|
||||
},
|
||||
},
|
||||
statusFields: `
|
||||
[string]: _
|
||||
|
||||
status: {
|
||||
name: "test"
|
||||
healthy: true
|
||||
}
|
||||
`,
|
||||
wantNoErr: true,
|
||||
description: "Should handle pattern labels ([string]: _) without panicking",
|
||||
},
|
||||
{
|
||||
name: "handles mixed label types without panic",
|
||||
templateContext: map[string]interface{}{
|
||||
"output": map[string]interface{}{
|
||||
"metadata": map[string]interface{}{
|
||||
"name": "test",
|
||||
},
|
||||
},
|
||||
},
|
||||
statusFields: `
|
||||
#Definition: {
|
||||
field: string
|
||||
}
|
||||
|
||||
_hidden: "value"
|
||||
|
||||
normalField: "visible"
|
||||
|
||||
status: {
|
||||
name: normalField
|
||||
type: _hidden
|
||||
def: #Definition & {field: "test"}
|
||||
}
|
||||
`,
|
||||
wantNoErr: true,
|
||||
description: "Should handle mixed label types without panicking",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
request := &StatusRequest{
|
||||
Details: tc.statusFields,
|
||||
Parameter: map[string]interface{}{},
|
||||
}
|
||||
|
||||
// This should not panic even with definition or hidden labels
|
||||
result, err := GetStatus(tc.templateContext, request)
|
||||
|
||||
if tc.wantNoErr {
|
||||
// We expect no panic and a valid result
|
||||
assert.NotNil(t, result, tc.description)
|
||||
// The function may return an error for invalid CUE, but it shouldn't panic
|
||||
if err != nil {
|
||||
t.Logf("Got expected error (non-panic): %v", err)
|
||||
}
|
||||
} else {
|
||||
assert.Error(t, err, tc.description)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetStatusMapWithComplexSelectors(t *testing.T) {
|
||||
// Test that getStatusMap doesn't panic with various selector types
|
||||
testCases := []struct {
|
||||
name string
|
||||
statusFields string
|
||||
templateContext map[string]interface{}
|
||||
shouldNotPanic bool
|
||||
}{
|
||||
{
|
||||
name: "definition selector in context",
|
||||
statusFields: `
|
||||
#Config: {
|
||||
enabled: bool
|
||||
}
|
||||
|
||||
config: #Config & {
|
||||
enabled: true
|
||||
}
|
||||
`,
|
||||
templateContext: map[string]interface{}{},
|
||||
shouldNotPanic: true,
|
||||
},
|
||||
{
|
||||
name: "hidden field selector",
|
||||
statusFields: `
|
||||
_internal: {
|
||||
secret: "hidden"
|
||||
}
|
||||
|
||||
public: _internal.secret
|
||||
`,
|
||||
templateContext: map[string]interface{}{},
|
||||
shouldNotPanic: true,
|
||||
},
|
||||
{
|
||||
name: "optional field selector",
|
||||
statusFields: `
|
||||
optional?: string
|
||||
|
||||
required: string | *"default"
|
||||
`,
|
||||
templateContext: map[string]interface{}{},
|
||||
shouldNotPanic: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
if tc.shouldNotPanic {
|
||||
// The function should not panic
|
||||
assert.NotPanics(t, func() {
|
||||
_, _, _ = getStatusMap(tc.templateContext, tc.statusFields, nil)
|
||||
}, "getStatusMap should not panic with %s", tc.name)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -38,6 +38,7 @@ import (
|
|||
"github.com/kubevela/workflow/pkg/cue/model/value"
|
||||
"github.com/kubevela/workflow/pkg/cue/process"
|
||||
|
||||
velacue "github.com/oam-dev/kubevela/pkg/cue"
|
||||
velaprocess "github.com/oam-dev/kubevela/pkg/cue/process"
|
||||
"github.com/oam-dev/kubevela/pkg/cue/task"
|
||||
"github.com/oam-dev/kubevela/pkg/oam"
|
||||
|
|
@ -139,7 +140,7 @@ func (wd *workloadDef) Complete(ctx process.Context, abstractTemplate string, pa
|
|||
continue
|
||||
}
|
||||
other, err := model.NewOther(iter.Value())
|
||||
name := iter.Label()
|
||||
name := velacue.GetSelectorLabel(iter.Selector())
|
||||
if err != nil {
|
||||
return errors.WithMessagef(err, "invalid outputs(%s) of workload %s", name, wd.name)
|
||||
}
|
||||
|
|
@ -272,7 +273,7 @@ func (td *traitDef) Complete(ctx process.Context, abstractTemplate string, param
|
|||
continue
|
||||
}
|
||||
other, err := model.NewOther(iter.Value())
|
||||
name := iter.Label()
|
||||
name := velacue.GetSelectorLabel(iter.Selector())
|
||||
if err != nil {
|
||||
return errors.WithMessagef(err, "invalid outputs(resource=%s) of trait %s", name, td.name)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,16 @@
|
|||
// Test template with pattern parameter selectors
|
||||
parameter: {
|
||||
// Regular string parameter
|
||||
name: string
|
||||
|
||||
// Pattern parameter selector - this would cause Unquoted() to panic
|
||||
[string]: _
|
||||
|
||||
// Another regular parameter
|
||||
port: *8080 | int
|
||||
}
|
||||
|
||||
output: {
|
||||
apiVersion: "apps/v1"
|
||||
kind: "Deployment"
|
||||
}
|
||||
|
|
@ -0,0 +1,33 @@
|
|||
/*
|
||||
Copyright 2021 The KubeVela Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cue
|
||||
|
||||
import "cuelang.org/go/cue"
|
||||
|
||||
// GetSelectorLabel safely extracts a label from a CUE selector.
|
||||
// It uses String() by default to avoid panics on pattern parameter selectors,
|
||||
// and only uses Unquoted() when it's safe (i.e., for StringLabel with concrete names).
|
||||
// This prevents panics that would occur when calling Unquoted() on pattern constraints like [string]: T.
|
||||
func GetSelectorLabel(selector cue.Selector) string {
|
||||
// Use String() as a safe default
|
||||
label := selector.String()
|
||||
// If it's a quoted string, unquote it safely
|
||||
if selector.IsString() && selector.LabelType() == cue.StringLabel {
|
||||
label = selector.Unquoted()
|
||||
}
|
||||
return label
|
||||
}
|
||||
|
|
@ -225,18 +225,25 @@ func (def *Definition) ToCUEString() (string, error) {
|
|||
if err != nil {
|
||||
return "", errors.Wrapf(err, "failed to parse template cue string")
|
||||
}
|
||||
|
||||
// Extract imports before fix.File() clears them
|
||||
importPaths := extractImportsFromFile(f)
|
||||
|
||||
f = fix.File(f)
|
||||
var importDecls, templateDecls []ast.Decl
|
||||
var templateDecls []ast.Decl
|
||||
for _, decl := range f.Decls {
|
||||
if importDecl, ok := decl.(*ast.ImportDecl); ok {
|
||||
importDecls = append(importDecls, importDecl)
|
||||
} else {
|
||||
if _, ok := decl.(*ast.ImportDecl); !ok {
|
||||
templateDecls = append(templateDecls, decl)
|
||||
}
|
||||
}
|
||||
importString, err := encodeDeclsToString(importDecls)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "failed to encode import decls")
|
||||
var importString string
|
||||
if len(importPaths) > 0 {
|
||||
// Reconstruct import statements from extracted paths
|
||||
var importLines []string
|
||||
for _, importPath := range importPaths {
|
||||
importLines = append(importLines, fmt.Sprintf("import %s", importPath))
|
||||
}
|
||||
importString = strings.Join(importLines, "\n") + "\n"
|
||||
}
|
||||
templateString, err = encodeDeclsToString(templateDecls)
|
||||
if err != nil {
|
||||
|
|
@ -244,7 +251,12 @@ func (def *Definition) ToCUEString() (string, error) {
|
|||
}
|
||||
templateString = fmt.Sprintf("template: {\n%s}", templateString)
|
||||
|
||||
completeCUEString := importString + "\n" + metadataString + "\n" + templateString
|
||||
var completeCUEString string
|
||||
if importString != "" {
|
||||
completeCUEString = importString + "\n" + metadataString + "\n" + templateString
|
||||
} else {
|
||||
completeCUEString = metadataString + "\n" + templateString
|
||||
}
|
||||
if completeCUEString, err = formatCUEString(completeCUEString); err != nil {
|
||||
return "", errors.Wrapf(err, "failed to format cue format string")
|
||||
}
|
||||
|
|
@ -647,17 +659,68 @@ func GetDefinitionDefaultSpec(kind string) map[string]interface{} {
|
|||
return map[string]interface{}{}
|
||||
}
|
||||
|
||||
// extractImportsFromFile extracts import paths from an AST file before fix.File() clears them.
|
||||
// This is necessary because fix.File() removes import declarations that are not directly used.
|
||||
// Returns a slice of import paths, where named imports are formatted as "name path".
|
||||
func extractImportsFromFile(f *ast.File) []string {
|
||||
var importPaths []string
|
||||
for _, decl := range f.Decls {
|
||||
if importDecl, ok := decl.(*ast.ImportDecl); ok {
|
||||
for _, spec := range importDecl.Specs {
|
||||
if spec.Path != nil {
|
||||
importPath := spec.Path.Value
|
||||
if spec.Name != nil {
|
||||
// Handle named imports
|
||||
importPaths = append(importPaths, fmt.Sprintf("%s %s", spec.Name.Name, importPath))
|
||||
} else {
|
||||
importPaths = append(importPaths, importPath)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return importPaths
|
||||
}
|
||||
|
||||
func formatCUEString(cueString string) (string, error) {
|
||||
f, err := parser.ParseFile("-", cueString, parser.ParseComments)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "failed to parse file during format cue string")
|
||||
}
|
||||
|
||||
// Extract imports before fix.File() clears them
|
||||
importPaths := extractImportsFromFile(f)
|
||||
|
||||
n := fix.File(f)
|
||||
b, err := format.Node(n, format.Simplify())
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "failed to format node during formating cue string")
|
||||
|
||||
// Format only non-import declarations
|
||||
var nonImportDecls []ast.Decl
|
||||
for _, decl := range n.Decls {
|
||||
if _, ok := decl.(*ast.ImportDecl); !ok {
|
||||
nonImportDecls = append(nonImportDecls, decl)
|
||||
}
|
||||
}
|
||||
return string(b), nil
|
||||
|
||||
var result strings.Builder
|
||||
|
||||
// Add imports first
|
||||
if len(importPaths) > 0 {
|
||||
for _, importPath := range importPaths {
|
||||
result.WriteString(fmt.Sprintf("import %s\n", importPath))
|
||||
}
|
||||
result.WriteString("\n")
|
||||
}
|
||||
|
||||
// Format and add other declarations
|
||||
if len(nonImportDecls) > 0 {
|
||||
b, err := format.Node(&ast.File{Decls: nonImportDecls}, format.Simplify())
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "failed to format node during formating cue string")
|
||||
}
|
||||
result.WriteString(string(b))
|
||||
}
|
||||
|
||||
return result.String(), nil
|
||||
}
|
||||
|
||||
func findAndDecodeFieldByLabel(slit *ast.StructLit, targetLabel string) (*ast.Field, error) {
|
||||
|
|
|
|||
|
|
@ -83,9 +83,9 @@ template: {
|
|||
}
|
||||
}
|
||||
providerBasic: {
|
||||
accessKey: string
|
||||
secretKey: string
|
||||
region: string
|
||||
accessKey!: string
|
||||
secretKey!: string
|
||||
region!: string
|
||||
}
|
||||
#AlibabaProvider: {
|
||||
providerBasic
|
||||
|
|
@ -137,5 +137,5 @@ template: {
|
|||
type: "ucloud"
|
||||
name: *"ucloud-provider" | string
|
||||
}
|
||||
parameter: *#AlibabaProvider | #AWSProvider | #AzureProvider | #BaiduProvider | #ECProvider | #GCPProvider | #TencentProvider | #UCloudProvider
|
||||
parameter: #AlibabaProvider | #AWSProvider | #AzureProvider | #BaiduProvider | #ECProvider | #GCPProvider | #TencentProvider | #UCloudProvider
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,261 @@
|
|||
/*
|
||||
Copyright 2022 The KubeVela Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package watcher
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
workflowv1alpha1 "github.com/kubevela/workflow/api/v1alpha1"
|
||||
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
|
||||
)
|
||||
|
||||
func TestApplicationMetricsWatcher(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
appRunning := &v1beta1.Application{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "app-running"},
|
||||
Status: common.AppStatus{
|
||||
Phase: common.ApplicationRunning,
|
||||
},
|
||||
}
|
||||
appRendering := &v1beta1.Application{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "app-rendering"},
|
||||
Status: common.AppStatus{
|
||||
Phase: common.ApplicationRendering,
|
||||
},
|
||||
}
|
||||
appWithWorkflow := &v1beta1.Application{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "app-with-workflow"},
|
||||
Status: common.AppStatus{
|
||||
Phase: common.ApplicationRunning,
|
||||
Workflow: &common.WorkflowStatus{
|
||||
Steps: []workflowv1alpha1.WorkflowStepStatus{
|
||||
{
|
||||
StepStatus: workflowv1alpha1.StepStatus{
|
||||
Name: "step1",
|
||||
Type: "apply-component",
|
||||
Phase: workflowv1alpha1.WorkflowStepPhaseSucceeded,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
appWithMixedWorkflow := &v1beta1.Application{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "app-with-mixed-workflow"},
|
||||
Status: common.AppStatus{
|
||||
Phase: common.ApplicationRunning,
|
||||
Workflow: &common.WorkflowStatus{
|
||||
Steps: []workflowv1alpha1.WorkflowStepStatus{
|
||||
{
|
||||
StepStatus: workflowv1alpha1.StepStatus{
|
||||
Name: "step1",
|
||||
Type: "apply-component",
|
||||
Phase: workflowv1alpha1.WorkflowStepPhaseSucceeded,
|
||||
},
|
||||
},
|
||||
{
|
||||
StepStatus: workflowv1alpha1.StepStatus{
|
||||
Name: "step2",
|
||||
Type: "apply-component",
|
||||
Phase: workflowv1alpha1.WorkflowStepPhaseFailed,
|
||||
},
|
||||
},
|
||||
{
|
||||
StepStatus: workflowv1alpha1.StepStatus{
|
||||
Name: "step3",
|
||||
Type: "suspend",
|
||||
Phase: workflowv1alpha1.WorkflowStepPhaseRunning,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
testCases := map[string]struct {
|
||||
app *v1beta1.Application
|
||||
op int
|
||||
wantPC map[string]int
|
||||
wantSC map[string]int
|
||||
wantPD map[string]struct{}
|
||||
wantSD map[string]struct{}
|
||||
}{
|
||||
"Add an application": {
|
||||
app: appRunning,
|
||||
op: 1,
|
||||
wantPC: map[string]int{"running": 1},
|
||||
wantSC: map[string]int{},
|
||||
wantPD: map[string]struct{}{"running": {}},
|
||||
wantSD: map[string]struct{}{},
|
||||
},
|
||||
"Add an application with workflow": {
|
||||
app: appWithWorkflow,
|
||||
op: 1,
|
||||
wantPC: map[string]int{"running": 1},
|
||||
wantSC: map[string]int{"apply-component/succeeded#": 1},
|
||||
wantPD: map[string]struct{}{"running": {}},
|
||||
wantSD: map[string]struct{}{"apply-component/succeeded#": {}},
|
||||
},
|
||||
"Delete an application": {
|
||||
app: appRunning,
|
||||
op: -1,
|
||||
wantPC: map[string]int{"running": -1},
|
||||
wantSC: map[string]int{},
|
||||
wantPD: map[string]struct{}{"running": {}},
|
||||
wantSD: map[string]struct{}{},
|
||||
},
|
||||
"Update an application": {
|
||||
app: appRendering,
|
||||
op: -1,
|
||||
wantPC: map[string]int{"rendering": -1},
|
||||
wantSC: map[string]int{},
|
||||
wantPD: map[string]struct{}{"rendering": {}},
|
||||
wantSD: map[string]struct{}{},
|
||||
},
|
||||
"Nil app status": {
|
||||
app: &v1beta1.Application{},
|
||||
op: 1,
|
||||
wantPC: map[string]int{"-": 1},
|
||||
wantSC: map[string]int{},
|
||||
wantPD: map[string]struct{}{"-": {}},
|
||||
wantSD: map[string]struct{}{},
|
||||
},
|
||||
"Add an application with mixed workflow": {
|
||||
app: appWithMixedWorkflow,
|
||||
op: 1,
|
||||
wantPC: map[string]int{"running": 1},
|
||||
wantSC: map[string]int{
|
||||
"apply-component/succeeded#": 1,
|
||||
"apply-component/failed#": 1,
|
||||
"suspend/running#": 1,
|
||||
},
|
||||
wantPD: map[string]struct{}{"running": {}},
|
||||
wantSD: map[string]struct{}{
|
||||
"apply-component/succeeded#": {},
|
||||
"apply-component/failed#": {},
|
||||
"suspend/running#": {},
|
||||
},
|
||||
},
|
||||
"Empty workflow steps": {
|
||||
app: &v1beta1.Application{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "app-empty-workflow"},
|
||||
Status: common.AppStatus{
|
||||
Phase: common.ApplicationRunning,
|
||||
Workflow: &common.WorkflowStatus{
|
||||
Steps: []workflowv1alpha1.WorkflowStepStatus{},
|
||||
},
|
||||
},
|
||||
},
|
||||
op: 1,
|
||||
wantPC: map[string]int{"running": 1},
|
||||
wantSC: map[string]int{},
|
||||
wantPD: map[string]struct{}{"running": {}},
|
||||
wantSD: map[string]struct{}{},
|
||||
},
|
||||
"Unknown phase": {
|
||||
app: &v1beta1.Application{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "app-unknown-phase"},
|
||||
Status: common.AppStatus{
|
||||
Phase: "unknown",
|
||||
},
|
||||
},
|
||||
op: 1,
|
||||
wantPC: map[string]int{"unknown": 1},
|
||||
wantSC: map[string]int{},
|
||||
wantPD: map[string]struct{}{"unknown": {}},
|
||||
wantSD: map[string]struct{}{},
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
watcher := &applicationMetricsWatcher{
|
||||
phaseCounter: map[string]int{},
|
||||
stepPhaseCounter: map[string]int{},
|
||||
phaseDirty: map[string]struct{}{},
|
||||
stepPhaseDirty: map[string]struct{}{},
|
||||
}
|
||||
watcher.inc(tc.app, tc.op)
|
||||
assert.Equal(t, tc.wantPC, watcher.phaseCounter)
|
||||
assert.Equal(t, tc.wantSC, watcher.stepPhaseCounter)
|
||||
assert.Equal(t, tc.wantPD, watcher.phaseDirty)
|
||||
assert.Equal(t, tc.wantSD, watcher.stepPhaseDirty)
|
||||
})
|
||||
}
|
||||
|
||||
t.Run("Idempotence", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
watcher := &applicationMetricsWatcher{
|
||||
phaseCounter: map[string]int{},
|
||||
stepPhaseCounter: map[string]int{},
|
||||
phaseDirty: map[string]struct{}{},
|
||||
stepPhaseDirty: map[string]struct{}{},
|
||||
}
|
||||
watcher.inc(appRunning, 1)
|
||||
watcher.inc(appRunning, 1)
|
||||
assert.Equal(t, map[string]int{"running": 2}, watcher.phaseCounter)
|
||||
assert.Equal(t, map[string]struct{}{"running": {}}, watcher.phaseDirty)
|
||||
})
|
||||
|
||||
t.Run("Report should clear dirty flags", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
watcher := &applicationMetricsWatcher{
|
||||
phaseCounter: map[string]int{"running": 1},
|
||||
stepPhaseCounter: map[string]int{"apply-component/succeeded#": 1},
|
||||
phaseDirty: map[string]struct{}{"running": {}},
|
||||
stepPhaseDirty: map[string]struct{}{"apply-component/succeeded#": {}},
|
||||
}
|
||||
watcher.report()
|
||||
assert.Empty(t, watcher.phaseDirty)
|
||||
assert.Empty(t, watcher.stepPhaseDirty)
|
||||
assert.Equal(t, map[string]int{"running": 1}, watcher.phaseCounter)
|
||||
assert.Equal(t, map[string]int{"apply-component/succeeded#": 1}, watcher.stepPhaseCounter)
|
||||
})
|
||||
|
||||
t.Run("getPhase helper function", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
watcher := &applicationMetricsWatcher{}
|
||||
assert.Equal(t, "-", watcher.getPhase(""))
|
||||
assert.Equal(t, "running", watcher.getPhase("running"))
|
||||
})
|
||||
|
||||
t.Run("getApp helper function", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
watcher := &applicationMetricsWatcher{}
|
||||
inputApp := &v1beta1.Application{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-app",
|
||||
Namespace: "test-ns",
|
||||
},
|
||||
Status: common.AppStatus{
|
||||
Phase: common.ApplicationRunning,
|
||||
},
|
||||
}
|
||||
resultApp := watcher.getApp(inputApp)
|
||||
assert.NotNil(t, resultApp)
|
||||
assert.Equal(t, "test-app", resultApp.Name)
|
||||
assert.Equal(t, "test-ns", resultApp.Namespace)
|
||||
assert.Equal(t, common.ApplicationRunning, resultApp.Status.Phase)
|
||||
})
|
||||
}
|
||||
|
|
@ -17,13 +17,19 @@
|
|||
package velaql
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestParseVelaQL(t *testing.T) {
|
||||
t.Parallel()
|
||||
testcases := []struct {
|
||||
ql string
|
||||
query QueryView
|
||||
|
|
@ -67,19 +73,22 @@ func TestParseVelaQL(t *testing.T) {
|
|||
err: nil,
|
||||
}}
|
||||
|
||||
for _, testcase := range testcases {
|
||||
q, err := ParseVelaQL(testcase.ql)
|
||||
assert.Equal(t, testcase.err != nil, err != nil)
|
||||
if err == nil {
|
||||
assert.Equal(t, testcase.query.View, q.View)
|
||||
assert.Equal(t, testcase.query.Export, q.Export)
|
||||
} else {
|
||||
assert.Equal(t, testcase.err.Error(), err.Error())
|
||||
}
|
||||
for i, testcase := range testcases {
|
||||
t.Run(fmt.Sprintf("testcase-%d", i), func(t *testing.T) {
|
||||
q, err := ParseVelaQL(testcase.ql)
|
||||
if testcase.err != nil {
|
||||
assert.EqualError(t, testcase.err, err.Error())
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, testcase.query.View, q.View)
|
||||
assert.Equal(t, testcase.query.Export, q.Export)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseParameter(t *testing.T) {
|
||||
t.Parallel()
|
||||
testcases := []struct {
|
||||
parameter string
|
||||
parameterMap map[string]interface{}
|
||||
|
|
@ -122,15 +131,114 @@ func TestParseParameter(t *testing.T) {
|
|||
err: nil,
|
||||
}}
|
||||
|
||||
for _, testcase := range testcases {
|
||||
result, err := ParseParameter(testcase.parameter)
|
||||
assert.Equal(t, testcase.err != nil, err != nil)
|
||||
if err == nil {
|
||||
for k, v := range result {
|
||||
assert.Equal(t, testcase.parameterMap[k], v)
|
||||
for i, testcase := range testcases {
|
||||
t.Run(fmt.Sprintf("testcase-%d", i), func(t *testing.T) {
|
||||
result, err := ParseParameter(testcase.parameter)
|
||||
if testcase.err != nil {
|
||||
assert.EqualError(t, testcase.err, err.Error())
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
for k, v := range result {
|
||||
assert.Equal(t, testcase.parameterMap[k], v)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
assert.Equal(t, testcase.err.Error(), err.Error())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseVelaQLFromPath(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := context.Background()
|
||||
|
||||
testdataDir := "testdata"
|
||||
|
||||
testcases := []struct {
|
||||
name string
|
||||
path string
|
||||
expectedExport string
|
||||
expectError bool
|
||||
errorContains string
|
||||
}{
|
||||
{
|
||||
name: "Simple valid CUE file with export field",
|
||||
path: filepath.Join(testdataDir, "simple-valid.cue"),
|
||||
expectedExport: "output.message",
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "Simple valid CUE file without export field",
|
||||
path: filepath.Join(testdataDir, "simple-no-export.cue"),
|
||||
expectedExport: DefaultExportValue,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "Nonexistent file path",
|
||||
path: filepath.Join(testdataDir, "nonexistent.cue"),
|
||||
expectError: true,
|
||||
errorContains: "read view file from",
|
||||
},
|
||||
{
|
||||
name: "Empty file path",
|
||||
path: "",
|
||||
expectError: true,
|
||||
errorContains: "read view file from",
|
||||
},
|
||||
{
|
||||
name: "Invalid CUE content",
|
||||
path: filepath.Join(testdataDir, "invalid-cue-content.cue"),
|
||||
expectError: true,
|
||||
errorContains: "error when parsing view",
|
||||
},
|
||||
{
|
||||
name: "File with invalid export type - should fallback to default",
|
||||
path: filepath.Join(testdataDir, "invalid-export.cue"),
|
||||
expectedExport: DefaultExportValue,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "Empty CUE file",
|
||||
path: filepath.Join(testdataDir, "empty.cue"),
|
||||
expectedExport: DefaultExportValue,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "File with leading/trailing whitespace",
|
||||
path: filepath.Join(testdataDir, "whitespace.cue"),
|
||||
expectedExport: "output.message",
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "Relative path",
|
||||
path: "testdata/nonexistent.cue",
|
||||
expectError: true,
|
||||
errorContains: "read view file from",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testcases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
result, err := ParseVelaQLFromPath(ctx, tc.path)
|
||||
|
||||
if tc.expectError {
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, result)
|
||||
if tc.errorContains != "" {
|
||||
assert.Contains(t, err.Error(), tc.errorContains)
|
||||
}
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, result)
|
||||
|
||||
if tc.path != "" {
|
||||
expectedContent, readErr := os.ReadFile(tc.path)
|
||||
require.NoError(t, readErr)
|
||||
assert.Equal(t, string(expectedContent), result.View)
|
||||
}
|
||||
|
||||
assert.Equal(t, tc.expectedExport, result.Export)
|
||||
assert.Nil(t, result.Parameter)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,4 @@
|
|||
invalid cue syntax [
|
||||
missing: colon
|
||||
invalid: brackets
|
||||
}
|
||||
|
|
@ -0,0 +1,9 @@
|
|||
parameter: {
|
||||
name: "test"
|
||||
}
|
||||
|
||||
output: {
|
||||
message: "hello " + parameter.name
|
||||
}
|
||||
|
||||
export: 123 // Invalid export type (should be string)
|
||||
|
|
@ -0,0 +1,7 @@
|
|||
parameter: {
|
||||
name: "test"
|
||||
}
|
||||
|
||||
output: {
|
||||
message: "hello " + parameter.name
|
||||
}
|
||||
|
|
@ -0,0 +1,9 @@
|
|||
parameter: {
|
||||
name: "test"
|
||||
}
|
||||
|
||||
output: {
|
||||
message: "hello " + parameter.name
|
||||
}
|
||||
|
||||
export: "output.message"
|
||||
|
|
@ -0,0 +1,12 @@
|
|||
|
||||
|
||||
parameter: {
|
||||
name: "test"
|
||||
}
|
||||
|
||||
output: {
|
||||
message: "hello " + parameter.name
|
||||
}
|
||||
|
||||
export: "output.message"
|
||||
|
||||
|
|
@ -29,12 +29,132 @@ import (
|
|||
dynamicfake "k8s.io/client-go/dynamic/fake"
|
||||
|
||||
"cuelang.org/go/cue/errors"
|
||||
"github.com/crossplane/crossplane-runtime/pkg/test"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
|
||||
"github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1beta1/core"
|
||||
)
|
||||
|
||||
func TestValidateDefinitionRevision(t *testing.T) {
|
||||
t.Parallel()
|
||||
scheme := runtime.NewScheme()
|
||||
v1beta1.AddToScheme(scheme)
|
||||
|
||||
baseCompDef := &v1beta1.ComponentDefinition{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-def",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: v1beta1.ComponentDefinitionSpec{
|
||||
Workload: common.WorkloadTypeDescriptor{
|
||||
Definition: common.WorkloadGVK{
|
||||
APIVersion: "apps/v1",
|
||||
Kind: "Deployment",
|
||||
},
|
||||
},
|
||||
Schematic: &common.Schematic{
|
||||
CUE: &common.CUE{
|
||||
Template: `
|
||||
output: {
|
||||
apiVersion: "apps/v1"
|
||||
kind: "Deployment"
|
||||
metadata: name: context.name
|
||||
}`,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
expectedDefRev, _, err := core.GatherRevisionInfo(baseCompDef)
|
||||
assert.NoError(t, err, "Setup: failed to gather revision info")
|
||||
expectedDefRev.Name = "test-def-v1"
|
||||
expectedDefRev.Namespace = "default"
|
||||
|
||||
mismatchedHashDefRev := expectedDefRev.DeepCopy()
|
||||
mismatchedHashDefRev.Spec.RevisionHash = "different-hash"
|
||||
|
||||
mismatchedSpecDefRev := expectedDefRev.DeepCopy()
|
||||
mismatchedSpecDefRev.Spec.ComponentDefinition.Spec.Workload.Definition.Kind = "StatefulSet"
|
||||
|
||||
// tweakedCompDef := baseCompDef.DeepCopy()
|
||||
// tweakedCompDef.Spec.Schematic.CUE.Template = `
|
||||
// output: {
|
||||
// apiVersion: "apps/v1"
|
||||
// kind: "Deployment"
|
||||
// metadata: name: context.name
|
||||
// // a tweak
|
||||
// }`
|
||||
testCases := map[string]struct {
|
||||
def runtime.Object
|
||||
defRevName types.NamespacedName
|
||||
existingObjs []runtime.Object
|
||||
expectErr bool
|
||||
expectedErrContains string
|
||||
}{
|
||||
"Success with matching definition revision": {
|
||||
def: baseCompDef,
|
||||
defRevName: types.NamespacedName{Name: "test-def-v1", Namespace: "default"},
|
||||
existingObjs: []runtime.Object{expectedDefRev},
|
||||
expectErr: false,
|
||||
},
|
||||
"Success when definition revision does not exist": {
|
||||
def: baseCompDef,
|
||||
defRevName: types.NamespacedName{Name: "test-def-v1", Namespace: "default"},
|
||||
existingObjs: []runtime.Object{},
|
||||
expectErr: false,
|
||||
},
|
||||
"Failure with revision hash mismatch": {
|
||||
def: baseCompDef,
|
||||
defRevName: types.NamespacedName{Name: "test-def-v1", Namespace: "default"},
|
||||
existingObjs: []runtime.Object{mismatchedHashDefRev},
|
||||
expectErr: true,
|
||||
expectedErrContains: "the definition's spec is different with existing definitionRevision's spec",
|
||||
},
|
||||
"Failure with spec mismatch (DeepEqual)": {
|
||||
def: baseCompDef,
|
||||
defRevName: types.NamespacedName{Name: "test-def-v1", Namespace: "default"},
|
||||
existingObjs: []runtime.Object{mismatchedSpecDefRev},
|
||||
expectErr: true,
|
||||
expectedErrContains: "the definition's spec is different with existing definitionRevision's spec",
|
||||
},
|
||||
"Failure with invalid definition revision name": {
|
||||
def: baseCompDef,
|
||||
defRevName: types.NamespacedName{Name: "invalid!name", Namespace: "default"},
|
||||
existingObjs: []runtime.Object{},
|
||||
expectErr: true,
|
||||
expectedErrContains: "invalid definitionRevision name",
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
cli := fake.NewClientBuilder().
|
||||
WithScheme(scheme).
|
||||
WithRuntimeObjects(tc.existingObjs...).
|
||||
Build()
|
||||
|
||||
err := ValidateDefinitionRevision(context.Background(), cli, tc.def, tc.defRevName)
|
||||
|
||||
if tc.expectErr {
|
||||
assert.Error(t, err)
|
||||
if tc.expectedErrContains != "" {
|
||||
assert.Contains(t, err.Error(), tc.expectedErrContains)
|
||||
}
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateCueTemplate(t *testing.T) {
|
||||
t.Parallel()
|
||||
cases := map[string]struct {
|
||||
cueTemplate string
|
||||
want error
|
||||
|
|
@ -66,19 +186,31 @@ func TestValidateCueTemplate(t *testing.T) {
|
|||
}`,
|
||||
want: errors.New("output.hello: reference \"world\" not found"),
|
||||
},
|
||||
"emptyCueTemp": {
|
||||
cueTemplate: "",
|
||||
want: nil,
|
||||
},
|
||||
"malformedCueTemp": {
|
||||
cueTemplate: "output: { metadata: { name: context.name, label: context.label, annotation: \"default\" }, hello: world ",
|
||||
want: errors.New("expected '}', found 'EOF'"),
|
||||
},
|
||||
}
|
||||
|
||||
for caseName, cs := range cases {
|
||||
t.Run(caseName, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
err := ValidateCueTemplate(cs.cueTemplate)
|
||||
if diff := cmp.Diff(cs.want, err, test.EquateErrors()); diff != "" {
|
||||
t.Errorf("\n%s\nValidateCueTemplate: -want , +got \n%s\n", cs.want, diff)
|
||||
if cs.want != nil {
|
||||
assert.EqualError(t, cs.want, err.Error())
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateCuexTemplate(t *testing.T) {
|
||||
t.Parallel()
|
||||
cases := map[string]struct {
|
||||
cueTemplate string
|
||||
want error
|
||||
|
|
@ -164,15 +296,19 @@ func TestValidateCuexTemplate(t *testing.T) {
|
|||
|
||||
for caseName, cs := range cases {
|
||||
t.Run(caseName, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
err := ValidateCuexTemplate(context.Background(), cs.cueTemplate)
|
||||
if diff := cmp.Diff(cs.want, err, test.EquateErrors()); diff != "" {
|
||||
t.Errorf("\n%s\nValidateCueTemplate: -want , +got \n%s\n", cs.want, diff)
|
||||
if cs.want != nil {
|
||||
assert.Equal(t, cs.want.Error(), err.Error())
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateSemanticVersion(t *testing.T) {
|
||||
t.Parallel()
|
||||
cases := map[string]struct {
|
||||
version string
|
||||
want error
|
||||
|
|
@ -192,18 +328,20 @@ func TestValidateSemanticVersion(t *testing.T) {
|
|||
}
|
||||
for caseName, cs := range cases {
|
||||
t.Run(caseName, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
err := ValidateSemanticVersion(cs.version)
|
||||
if cs.want != nil {
|
||||
assert.Equal(t, err.Error(), cs.want.Error())
|
||||
assert.Error(t, err)
|
||||
assert.EqualError(t, cs.want, err.Error())
|
||||
} else {
|
||||
assert.Equal(t, err, cs.want)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateMultipleDefVersionsNotPresent(t *testing.T) {
|
||||
t.Parallel()
|
||||
cases := map[string]struct {
|
||||
version string
|
||||
revisionName string
|
||||
|
|
@ -227,11 +365,13 @@ func TestValidateMultipleDefVersionsNotPresent(t *testing.T) {
|
|||
}
|
||||
for caseName, cs := range cases {
|
||||
t.Run(caseName, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
err := ValidateMultipleDefVersionsNotPresent(cs.version, cs.revisionName, "ComponentDefinition")
|
||||
if cs.want != nil {
|
||||
assert.Equal(t, err.Error(), cs.want.Error())
|
||||
assert.Error(t, err)
|
||||
assert.EqualError(t, cs.want, err.Error())
|
||||
} else {
|
||||
assert.Equal(t, err, cs.want)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
})
|
||||
|
|
|
|||
|
|
@ -48,6 +48,7 @@ func setupClient(ctx context.Context, t *testing.T) client.Client {
|
|||
}
|
||||
|
||||
func TestParser(t *testing.T) {
|
||||
t.Parallel()
|
||||
r := require.New(t)
|
||||
ctx := context.Background()
|
||||
act := &mock.Action{}
|
||||
|
|
@ -106,7 +107,67 @@ func TestParser(t *testing.T) {
|
|||
r.Equal(act.Phase, "Wait")
|
||||
}
|
||||
|
||||
func TestRenderComponent(t *testing.T) {
|
||||
t.Parallel()
|
||||
r := require.New(t)
|
||||
ctx := context.Background()
|
||||
cuectx := cuecontext.New()
|
||||
cli := setupClient(ctx, t)
|
||||
|
||||
v := cuectx.CompileString(`$params: {
|
||||
value: {
|
||||
name: "test-render",
|
||||
type: "webservice",
|
||||
}
|
||||
}`)
|
||||
r.NoError(v.Err())
|
||||
|
||||
mockComponentRender := func(ctx context.Context, comp common.ApplicationComponent, patcher *cue.Value, clusterName string, overrideNamespace string) (*unstructured.Unstructured, []*unstructured.Unstructured, error) {
|
||||
r.Equal("test-render", comp.Name)
|
||||
workload := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"apiVersion": "apps/v1",
|
||||
"kind": "Deployment",
|
||||
"metadata": map[string]interface{}{
|
||||
"name": "test-workload",
|
||||
},
|
||||
},
|
||||
}
|
||||
trait := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"apiVersion": "v1",
|
||||
"kind": "Service",
|
||||
"metadata": map[string]interface{}{
|
||||
"name": "test-trait",
|
||||
"labels": map[string]interface{}{
|
||||
"trait.oam.dev/resource": "mytrait",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
return workload, []*unstructured.Unstructured{trait}, nil
|
||||
}
|
||||
|
||||
res, err := RenderComponent(ctx, &oamprovidertypes.Params[cue.Value]{
|
||||
Params: v,
|
||||
RuntimeParams: oamprovidertypes.RuntimeParams{
|
||||
KubeClient: cli,
|
||||
ComponentRender: mockComponentRender,
|
||||
},
|
||||
})
|
||||
r.NoError(err)
|
||||
|
||||
output, err := res.LookupPath(cue.ParsePath("$returns.output.metadata.name")).String()
|
||||
r.NoError(err)
|
||||
r.Equal("test-workload", output)
|
||||
|
||||
outputs, err := res.LookupPath(cue.ParsePath("$returns.outputs.mytrait.metadata.name")).String()
|
||||
r.NoError(err)
|
||||
r.Equal("test-workload", outputs)
|
||||
}
|
||||
|
||||
func TestLoadComponent(t *testing.T) {
|
||||
t.Parallel()
|
||||
r := require.New(t)
|
||||
ctx := context.Background()
|
||||
act := &mock.Action{}
|
||||
|
|
@ -171,6 +232,7 @@ func TestLoadComponent(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestLoadComponentInOrder(t *testing.T) {
|
||||
t.Parallel()
|
||||
r := require.New(t)
|
||||
ctx := context.Background()
|
||||
act := &mock.Action{}
|
||||
|
|
|
|||
|
|
@ -15,3 +15,317 @@
|
|||
*/
|
||||
|
||||
package query
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
||||
"github.com/oam-dev/kubevela/pkg/oam"
|
||||
querytypes "github.com/oam-dev/kubevela/pkg/utils/types"
|
||||
)
|
||||
|
||||
func TestBuildResourceArray(t *testing.T) {
|
||||
t.Parallel()
|
||||
// Define common objects used across tests
|
||||
pod1 := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"apiVersion": "v1",
|
||||
"kind": "Pod",
|
||||
"metadata": map[string]interface{}{
|
||||
"name": "pod1",
|
||||
},
|
||||
},
|
||||
}
|
||||
pod2 := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"apiVersion": "v1",
|
||||
"kind": "Pod",
|
||||
"metadata": map[string]interface{}{
|
||||
"name": "pod2",
|
||||
"annotations": map[string]interface{}{
|
||||
oam.AnnotationPublishVersion: "v2.0.0-pod",
|
||||
oam.AnnotationDeployVersion: "rev2-pod",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
deployment := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"apiVersion": "apps/v1",
|
||||
"kind": "Deployment",
|
||||
"metadata": map[string]interface{}{
|
||||
"name": "my-app",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Define common tree nodes
|
||||
parentWorkloadNode := &querytypes.ResourceTreeNode{
|
||||
APIVersion: "apps/v1",
|
||||
Kind: "Deployment",
|
||||
Name: "my-app",
|
||||
Namespace: "default",
|
||||
Object: deployment,
|
||||
}
|
||||
|
||||
pod1Node := &querytypes.ResourceTreeNode{
|
||||
APIVersion: "v1",
|
||||
Kind: "Pod",
|
||||
Name: "pod1",
|
||||
Namespace: "default",
|
||||
Object: pod1,
|
||||
}
|
||||
|
||||
pod2Node := &querytypes.ResourceTreeNode{
|
||||
APIVersion: "v1",
|
||||
Kind: "Pod",
|
||||
Name: "pod2",
|
||||
Namespace: "default",
|
||||
Object: pod2,
|
||||
}
|
||||
|
||||
replicaSetNode := &querytypes.ResourceTreeNode{
|
||||
APIVersion: "apps/v1",
|
||||
Kind: "ReplicaSet",
|
||||
Name: "my-app-rs",
|
||||
Namespace: "default",
|
||||
LeafNodes: []*querytypes.ResourceTreeNode{pod1Node, pod2Node},
|
||||
}
|
||||
|
||||
// Define test cases
|
||||
testCases := map[string]struct {
|
||||
res querytypes.AppliedResource
|
||||
parent *querytypes.ResourceTreeNode
|
||||
node *querytypes.ResourceTreeNode
|
||||
kind string
|
||||
apiVersion string
|
||||
expected []querytypes.ResourceItem
|
||||
}{
|
||||
"simple case with one matching pod": {
|
||||
res: querytypes.AppliedResource{
|
||||
Cluster: "local",
|
||||
Component: "my-comp",
|
||||
PublishVersion: "v1.0.0",
|
||||
DeployVersion: "rev1",
|
||||
},
|
||||
parent: parentWorkloadNode,
|
||||
node: pod1Node,
|
||||
kind: "Pod",
|
||||
apiVersion: "v1",
|
||||
expected: []querytypes.ResourceItem{
|
||||
{
|
||||
Cluster: "local",
|
||||
Component: "my-comp",
|
||||
Workload: querytypes.Workload{
|
||||
APIVersion: "apps/v1",
|
||||
Kind: "Deployment",
|
||||
Name: "my-app",
|
||||
Namespace: "default",
|
||||
},
|
||||
Object: pod1,
|
||||
PublishVersion: "v1.0.0",
|
||||
DeployVersion: "rev1",
|
||||
},
|
||||
},
|
||||
},
|
||||
"nested case with multiple matching pods": {
|
||||
res: querytypes.AppliedResource{
|
||||
Cluster: "remote",
|
||||
Component: "my-comp-2",
|
||||
PublishVersion: "v2.0.0",
|
||||
DeployVersion: "rev2",
|
||||
},
|
||||
parent: parentWorkloadNode,
|
||||
node: replicaSetNode,
|
||||
kind: "Pod",
|
||||
apiVersion: "v1",
|
||||
expected: []querytypes.ResourceItem{
|
||||
{
|
||||
Cluster: "remote",
|
||||
Component: "my-comp-2",
|
||||
Workload: querytypes.Workload{
|
||||
APIVersion: "apps/v1",
|
||||
Kind: "ReplicaSet",
|
||||
Name: "my-app-rs",
|
||||
Namespace: "default",
|
||||
},
|
||||
Object: pod1,
|
||||
PublishVersion: "v2.0.0",
|
||||
DeployVersion: "rev2",
|
||||
},
|
||||
{
|
||||
Cluster: "remote",
|
||||
Component: "my-comp-2",
|
||||
Workload: querytypes.Workload{
|
||||
APIVersion: "apps/v1",
|
||||
Kind: "ReplicaSet",
|
||||
Name: "my-app-rs",
|
||||
Namespace: "default",
|
||||
},
|
||||
Object: pod2,
|
||||
PublishVersion: "v2.0.0-pod", // From annotation
|
||||
DeployVersion: "rev2-pod", // From annotation
|
||||
},
|
||||
},
|
||||
},
|
||||
"no matching nodes": {
|
||||
res: querytypes.AppliedResource{},
|
||||
parent: parentWorkloadNode,
|
||||
node: pod1Node,
|
||||
kind: "Service",
|
||||
apiVersion: "v1",
|
||||
expected: nil,
|
||||
},
|
||||
"empty node": {
|
||||
res: querytypes.AppliedResource{},
|
||||
parent: parentWorkloadNode,
|
||||
node: &querytypes.ResourceTreeNode{},
|
||||
kind: "Pod",
|
||||
apiVersion: "v1",
|
||||
expected: nil,
|
||||
},
|
||||
"complex tree with mixed resources": {
|
||||
res: querytypes.AppliedResource{
|
||||
Cluster: "local",
|
||||
Component: "my-comp",
|
||||
PublishVersion: "v1.0.0",
|
||||
DeployVersion: "rev1",
|
||||
},
|
||||
parent: parentWorkloadNode,
|
||||
node: &querytypes.ResourceTreeNode{
|
||||
APIVersion: "apps/v1",
|
||||
Kind: "ReplicaSet",
|
||||
Name: "my-app-rs",
|
||||
Namespace: "default",
|
||||
LeafNodes: []*querytypes.ResourceTreeNode{
|
||||
pod1Node,
|
||||
{
|
||||
APIVersion: "v1",
|
||||
Kind: "Service",
|
||||
Name: "my-service",
|
||||
},
|
||||
pod2Node,
|
||||
},
|
||||
},
|
||||
kind: "Pod",
|
||||
apiVersion: "v1",
|
||||
expected: []querytypes.ResourceItem{
|
||||
{
|
||||
Cluster: "local",
|
||||
Component: "my-comp",
|
||||
Workload: querytypes.Workload{
|
||||
APIVersion: "apps/v1",
|
||||
Kind: "ReplicaSet",
|
||||
Name: "my-app-rs",
|
||||
Namespace: "default",
|
||||
},
|
||||
Object: pod1,
|
||||
PublishVersion: "v1.0.0",
|
||||
DeployVersion: "rev1",
|
||||
},
|
||||
{
|
||||
Cluster: "local",
|
||||
Component: "my-comp",
|
||||
Workload: querytypes.Workload{
|
||||
APIVersion: "apps/v1",
|
||||
Kind: "ReplicaSet",
|
||||
Name: "my-app-rs",
|
||||
Namespace: "default",
|
||||
},
|
||||
Object: pod2,
|
||||
PublishVersion: "v2.0.0-pod",
|
||||
DeployVersion: "rev2-pod",
|
||||
},
|
||||
},
|
||||
},
|
||||
"case-insensitive matching": {
|
||||
res: querytypes.AppliedResource{
|
||||
Cluster: "local",
|
||||
Component: "my-comp",
|
||||
PublishVersion: "v1.0.0",
|
||||
DeployVersion: "rev1",
|
||||
},
|
||||
parent: parentWorkloadNode,
|
||||
node: pod1Node,
|
||||
kind: "pod",
|
||||
apiVersion: "V1",
|
||||
expected: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
result := buildResourceArray(tc.res, tc.parent, tc.node, tc.kind, tc.apiVersion)
|
||||
assert.ElementsMatch(t, tc.expected, result, "The returned resource items should match the expected ones")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildResourceItem(t *testing.T) {
|
||||
t.Parallel()
|
||||
pod := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"apiVersion": "v1",
|
||||
"kind": "Pod",
|
||||
"metadata": map[string]interface{}{
|
||||
"name": "test-pod",
|
||||
},
|
||||
},
|
||||
}
|
||||
podWithAnnotations := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"apiVersion": "v1",
|
||||
"kind": "Pod",
|
||||
"metadata": map[string]interface{}{
|
||||
"name": "test-pod-annotated",
|
||||
"annotations": map[string]interface{}{
|
||||
oam.AnnotationPublishVersion: "v2.0.0-annotated",
|
||||
oam.AnnotationDeployVersion: "rev2-annotated",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
res := querytypes.AppliedResource{
|
||||
Cluster: "test-cluster",
|
||||
Component: "test-comp",
|
||||
PublishVersion: "v1.0.0-res",
|
||||
DeployVersion: "rev1-res",
|
||||
}
|
||||
workload := querytypes.Workload{
|
||||
APIVersion: "apps/v1",
|
||||
Kind: "Deployment",
|
||||
Name: "test-workload",
|
||||
Namespace: "test-ns",
|
||||
}
|
||||
|
||||
t.Run("without annotations", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
item := buildResourceItem(res, workload, pod)
|
||||
assert.Equal(t, "test-cluster", item.Cluster)
|
||||
assert.Equal(t, "test-comp", item.Component)
|
||||
assert.Equal(t, workload, item.Workload)
|
||||
assert.Equal(t, pod, item.Object)
|
||||
assert.Equal(t, "v1.0.0-res", item.PublishVersion)
|
||||
assert.Equal(t, "rev1-res", item.DeployVersion)
|
||||
})
|
||||
|
||||
t.Run("with annotations", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
item := buildResourceItem(res, workload, podWithAnnotations)
|
||||
assert.Equal(t, "test-cluster", item.Cluster)
|
||||
assert.Equal(t, "test-comp", item.Component)
|
||||
assert.Equal(t, workload, item.Workload)
|
||||
assert.Equal(t, podWithAnnotations, item.Object)
|
||||
assert.Equal(t, "v2.0.0-annotated", item.PublishVersion)
|
||||
assert.Equal(t, "rev2-annotated", item.DeployVersion)
|
||||
})
|
||||
|
||||
t.Run("annotation override", func(t *testing.T) {
|
||||
item := buildResourceItem(res, workload, podWithAnnotations)
|
||||
assert.Equal(t, "v2.0.0-annotated", item.PublishVersion)
|
||||
assert.Equal(t, "rev2-annotated", item.DeployVersion)
|
||||
})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -138,7 +138,7 @@ import (
|
|||
metadata: name: r.metadata.name
|
||||
}]
|
||||
},
|
||||
for r in resourceMap.workload + resourceMap.service {
|
||||
for r in list.Concat([resourceMap.workload, resourceMap.service]) {
|
||||
type: "k8s-objects"
|
||||
name: _prefix + strings.ToLower("\(r.kind)-\(r.metadata.name)")
|
||||
properties: objects: [{
|
||||
|
|
|
|||
|
|
@ -322,7 +322,7 @@ func (d *debugOpts) separateBySteps(v cue.Value, ioStreams cmdutil.IOStreams) er
|
|||
if it.Value().IncompleteKind() == cue.BottomKind {
|
||||
break
|
||||
}
|
||||
fieldName := it.Label()
|
||||
fieldName := it.Selector().String()
|
||||
fieldList = append(fieldList, fieldName)
|
||||
fieldMap[fieldName] = it.Value()
|
||||
}
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue