diff --git a/PROJECT b/PROJECT
index d89f2c28c43..01f0bd6bb87 100644
--- a/PROJECT
+++ b/PROJECT
@@ -246,4 +246,13 @@ resources:
kind: StorageProvider
path: github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1
version: v1alpha1
+- api:
+ crdVersion: v1
+ namespaced: true
+ controller: true
+ domain: kubeblocks.io
+ group: experimental
+ kind: NodeCountScaler
+ path: github.com/apecloud/kubeblocks/apis/experimental/v1alpha1
+ version: v1alpha1
version: "3"
diff --git a/apis/experimental/doc.go b/apis/experimental/doc.go
new file mode 100644
index 00000000000..e6e399d1139
--- /dev/null
+++ b/apis/experimental/doc.go
@@ -0,0 +1,29 @@
+/*
+Copyright (C) 2022-2024 ApeCloud Co., Ltd
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+Package experimental is a group of APIs that are in experimental.
+
+We often encounter needs and ideas from community users, customers, or internal discussions.
+These ideas or features typically require a lot of preparatory work before they can be officially supported in KubeBlocks.
+This includes requirements analysis, solution research, API design and discussions, solution design and discussions, and so on.
+This process often takes a considerable amount of time.
+
+To quickly validate the feasibility of feature functionalities, an experimental API is now added to KubeBlocks.
+This API is used for rapidly verifying the feasibility of a specific feature.
+Please note that experimental APIs do not guarantee backward compatibility.
+*/
+package experimental
diff --git a/apis/experimental/v1alpha1/groupversion_info.go b/apis/experimental/v1alpha1/groupversion_info.go
new file mode 100644
index 00000000000..9659fce5377
--- /dev/null
+++ b/apis/experimental/v1alpha1/groupversion_info.go
@@ -0,0 +1,39 @@
+/*
+Copyright (C) 2022-2024 ApeCloud Co., Ltd
+
+This file is part of KubeBlocks project
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU Affero General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU Affero General Public License for more details.
+
+You should have received a copy of the GNU Affero General Public License
+along with this program. If not, see .
+*/
+
+// Package v1alpha1 contains API Schema definitions for the experimental v1alpha1 API group
+// +kubebuilder:object:generate=true
+// +groupName=experimental.kubeblocks.io
+package v1alpha1
+
+import (
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "sigs.k8s.io/controller-runtime/pkg/scheme"
+)
+
+var (
+ // GroupVersion is group version used to register these objects
+ GroupVersion = schema.GroupVersion{Group: "experimental.kubeblocks.io", Version: "v1alpha1"}
+
+ // SchemeBuilder is used to add go types to the GroupVersionKind scheme
+ SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
+
+ // AddToScheme adds the types in this group-version to the given scheme.
+ AddToScheme = SchemeBuilder.AddToScheme
+)
diff --git a/apis/experimental/v1alpha1/nodecountscaler_types.go b/apis/experimental/v1alpha1/nodecountscaler_types.go
new file mode 100644
index 00000000000..e317752551d
--- /dev/null
+++ b/apis/experimental/v1alpha1/nodecountscaler_types.go
@@ -0,0 +1,128 @@
+/*
+Copyright (C) 2022-2024 ApeCloud Co., Ltd
+
+This file is part of KubeBlocks project
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU Affero General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU Affero General Public License for more details.
+
+You should have received a copy of the GNU Affero General Public License
+along with this program. If not, see .
+*/
+
+package v1alpha1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
+// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
+
+// NodeCountScalerSpec defines the desired state of NodeCountScaler
+type NodeCountScalerSpec struct {
+ // Specified the target Cluster name this scaler applies to.
+ TargetClusterName string `json:"targetClusterName"`
+
+ // Specified the target Component names this scaler applies to.
+ // All Components will be applied if not set.
+ //
+ // +optional
+ TargetComponentNames []string `json:"targetComponentNames,omitempty"`
+}
+
+// NodeCountScalerStatus defines the observed state of NodeCountScaler
+type NodeCountScalerStatus struct {
+ // Records the current status information of all Components specified in the NodeCountScalerSpec.
+ //
+ // +optional
+ ComponentStatuses []ComponentStatus `json:"componentStatuses,omitempty"`
+
+ // Represents the latest available observations of a nodecountscaler's current state.
+ // Known .status.conditions.type are: "ScaleReady".
+ // ScaleReady - All target components are ready.
+ //
+ // +optional
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ // +listType=map
+ // +listMapKey=type
+ Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"`
+
+ // LastScaleTime is the last time the NodeCountScaler scaled the number of instances.
+ //
+ // +optional
+ LastScaleTime metav1.Time `json:"lastScaleTime,omitempty"`
+}
+
+type ComponentStatus struct {
+ // Specified the Component name.
+ Name string `json:"name"`
+
+ // The current number of instances of this component.
+ CurrentReplicas int32 `json:"currentReplicas"`
+
+ // The number of instances of this component with a Ready condition.
+ ReadyReplicas int32 `json:"readyReplicas"`
+
+ // The number of instances of this component with a Ready condition for at least MinReadySeconds defined in the instance template.
+ AvailableReplicas int32 `json:"availableReplicas"`
+
+ // The desired number of instances of this component.
+ // Usually, it should be the number of nodes.
+ DesiredReplicas int32 `json:"desiredReplicas"`
+}
+
+type ConditionType string
+
+const (
+ // ScaleReady is added to a nodecountscaler when all target components are ready.
+ ScaleReady ConditionType = "ScaleReady"
+)
+
+const (
+ // ReasonNotReady is a reason for condition ScaleReady.
+ ReasonNotReady = "NotReady"
+
+ // ReasonReady is a reason for condition ScaleReady.
+ ReasonReady = "Ready"
+)
+
+// +genclient
+// +kubebuilder:object:root=true
+// +kubebuilder:subresource:status
+// +kubebuilder:resource:categories={kubeblocks,all},shortName=ncs
+// +kubebuilder:printcolumn:name="TARGET-CLUSTER-NAME",type="string",JSONPath=".spec.targetClusterName",description="target cluster name."
+// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type==\"ScaleReady\")].status",description="scale ready."
+// +kubebuilder:printcolumn:name="REASON",type="string",JSONPath=".status.conditions[?(@.type==\"ScaleReady\")].reason",description="reason."
+// +kubebuilder:printcolumn:name="MESSAGE",type="string",JSONPath=".status.conditions[?(@.type==\"ScaleReady\")].message",description="message."
+// +kubebuilder:printcolumn:name="LAST-SCALE-TIME",type="date",JSONPath=".status.lastScaleTime"
+
+// NodeCountScaler is the Schema for the nodecountscalers API
+type NodeCountScaler struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ Spec NodeCountScalerSpec `json:"spec,omitempty"`
+ Status NodeCountScalerStatus `json:"status,omitempty"`
+}
+
+//+kubebuilder:object:root=true
+
+// NodeCountScalerList contains a list of NodeCountScaler
+type NodeCountScalerList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []NodeCountScaler `json:"items"`
+}
+
+func init() {
+ SchemeBuilder.Register(&NodeCountScaler{}, &NodeCountScalerList{})
+}
diff --git a/apis/experimental/v1alpha1/zz_generated.deepcopy.go b/apis/experimental/v1alpha1/zz_generated.deepcopy.go
new file mode 100644
index 00000000000..71393796dc8
--- /dev/null
+++ b/apis/experimental/v1alpha1/zz_generated.deepcopy.go
@@ -0,0 +1,152 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright (C) 2022-2024 ApeCloud Co., Ltd
+
+This file is part of KubeBlocks project
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU Affero General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU Affero General Public License for more details.
+
+You should have received a copy of the GNU Affero General Public License
+along with this program. If not, see .
+*/
+
+// Code generated by controller-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ComponentStatus) DeepCopyInto(out *ComponentStatus) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentStatus.
+func (in *ComponentStatus) DeepCopy() *ComponentStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ComponentStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeCountScaler) DeepCopyInto(out *NodeCountScaler) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeCountScaler.
+func (in *NodeCountScaler) DeepCopy() *NodeCountScaler {
+ if in == nil {
+ return nil
+ }
+ out := new(NodeCountScaler)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *NodeCountScaler) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeCountScalerList) DeepCopyInto(out *NodeCountScalerList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]NodeCountScaler, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeCountScalerList.
+func (in *NodeCountScalerList) DeepCopy() *NodeCountScalerList {
+ if in == nil {
+ return nil
+ }
+ out := new(NodeCountScalerList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *NodeCountScalerList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeCountScalerSpec) DeepCopyInto(out *NodeCountScalerSpec) {
+ *out = *in
+ if in.TargetComponentNames != nil {
+ in, out := &in.TargetComponentNames, &out.TargetComponentNames
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeCountScalerSpec.
+func (in *NodeCountScalerSpec) DeepCopy() *NodeCountScalerSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(NodeCountScalerSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeCountScalerStatus) DeepCopyInto(out *NodeCountScalerStatus) {
+ *out = *in
+ if in.ComponentStatuses != nil {
+ in, out := &in.ComponentStatuses, &out.ComponentStatuses
+ *out = make([]ComponentStatus, len(*in))
+ copy(*out, *in)
+ }
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]v1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ in.LastScaleTime.DeepCopyInto(&out.LastScaleTime)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeCountScalerStatus.
+func (in *NodeCountScalerStatus) DeepCopy() *NodeCountScalerStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(NodeCountScalerStatus)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/cmd/manager/main.go b/cmd/manager/main.go
index d374ddbaf8b..4514510f429 100644
--- a/cmd/manager/main.go
+++ b/cmd/manager/main.go
@@ -52,12 +52,14 @@ import (
appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1"
appsv1beta1 "github.com/apecloud/kubeblocks/apis/apps/v1beta1"
dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1"
+ experimentalv1alpha1 "github.com/apecloud/kubeblocks/apis/experimental/v1alpha1"
extensionsv1alpha1 "github.com/apecloud/kubeblocks/apis/extensions/v1alpha1"
storagev1alpha1 "github.com/apecloud/kubeblocks/apis/storage/v1alpha1"
"github.com/apecloud/kubeblocks/apis/workloads/legacy"
workloadsv1alpha1 "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1"
appscontrollers "github.com/apecloud/kubeblocks/controllers/apps"
"github.com/apecloud/kubeblocks/controllers/apps/configuration"
+ experimentalcontrollers "github.com/apecloud/kubeblocks/controllers/experimental"
extensionscontrollers "github.com/apecloud/kubeblocks/controllers/extensions"
k8scorecontrollers "github.com/apecloud/kubeblocks/controllers/k8score"
workloadscontrollers "github.com/apecloud/kubeblocks/controllers/workloads"
@@ -81,9 +83,10 @@ const (
leaderElectIDFlagKey flagName = "leader-elect-id"
// switch flags key for API groups
- appsFlagKey flagName = "apps"
- extensionsFlagKey flagName = "extensions"
- workloadsFlagKey flagName = "workloads"
+ appsFlagKey flagName = "apps"
+ extensionsFlagKey flagName = "extensions"
+ workloadsFlagKey flagName = "workloads"
+ experimentalFlagKey flagName = "experimental"
multiClusterKubeConfigFlagKey flagName = "multi-cluster-kubeconfig"
multiClusterContextsFlagKey flagName = "multi-cluster-contexts"
@@ -108,6 +111,7 @@ func init() {
utilruntime.Must(appsv1beta1.AddToScheme(scheme))
utilruntime.Must(legacy.AddToScheme(scheme))
utilruntime.Must(apiextv1.AddToScheme(scheme))
+ utilruntime.Must(experimentalv1alpha1.AddToScheme(scheme))
// +kubebuilder:scaffold:scheme
viper.SetConfigName("config") // name of config file (without extension)
@@ -168,6 +172,8 @@ func setupFlags() {
"Enable the extensions controller manager.")
flag.Bool(workloadsFlagKey.String(), true,
"Enable the workloads controller manager.")
+ flag.Bool(experimentalFlagKey.String(), false,
+ "Enable the experimental controller manager.")
flag.String(multiClusterKubeConfigFlagKey.String(), "", "Paths to the kubeconfig for multi-cluster accessing.")
flag.String(multiClusterContextsFlagKey.String(), "", "Kube contexts the manager will talk to.")
@@ -510,6 +516,17 @@ func main() {
os.Exit(1)
}
}
+
+ if viper.GetBool(experimentalFlagKey.viperName()) {
+ if err = (&experimentalcontrollers.NodeCountScalerReconciler{
+ Client: mgr.GetClient(),
+ Scheme: mgr.GetScheme(),
+ Recorder: mgr.GetEventRecorderFor("node-count-scaler-controller"),
+ }).SetupWithManager(mgr); err != nil {
+ setupLog.Error(err, "unable to create controller", "controller", "NodeCountScaler")
+ os.Exit(1)
+ }
+ }
// +kubebuilder:scaffold:builder
if viper.GetBool("enable_webhooks") {
diff --git a/config/crd/bases/experimental.kubeblocks.io_nodecountscalers.yaml b/config/crd/bases/experimental.kubeblocks.io_nodecountscalers.yaml
new file mode 100644
index 00000000000..73eba8e34ac
--- /dev/null
+++ b/config/crd/bases/experimental.kubeblocks.io_nodecountscalers.yaml
@@ -0,0 +1,199 @@
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.12.1
+ labels:
+ app.kubernetes.io/name: kubeblocks
+ name: nodecountscalers.experimental.kubeblocks.io
+spec:
+ group: experimental.kubeblocks.io
+ names:
+ categories:
+ - kubeblocks
+ - all
+ kind: NodeCountScaler
+ listKind: NodeCountScalerList
+ plural: nodecountscalers
+ shortNames:
+ - ncs
+ singular: nodecountscaler
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: target cluster name.
+ jsonPath: .spec.targetClusterName
+ name: TARGET-CLUSTER-NAME
+ type: string
+ - description: scale ready.
+ jsonPath: .status.conditions[?(@.type=="ScaleReady")].status
+ name: READY
+ type: string
+ - description: reason.
+ jsonPath: .status.conditions[?(@.type=="ScaleReady")].reason
+ name: REASON
+ type: string
+ - description: message.
+ jsonPath: .status.conditions[?(@.type=="ScaleReady")].message
+ name: MESSAGE
+ type: string
+ - jsonPath: .status.lastScaleTime
+ name: LAST-SCALE-TIME
+ type: date
+ name: v1alpha1
+ schema:
+ openAPIV3Schema:
+ description: NodeCountScaler is the Schema for the nodecountscalers API
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: NodeCountScalerSpec defines the desired state of NodeCountScaler
+ properties:
+ targetClusterName:
+ description: Specified the target Cluster name this scaler applies
+ to.
+ type: string
+ targetComponentNames:
+ description: Specified the target Component names this scaler applies
+ to. All Components will be applied if not set.
+ items:
+ type: string
+ type: array
+ required:
+ - targetClusterName
+ type: object
+ status:
+ description: NodeCountScalerStatus defines the observed state of NodeCountScaler
+ properties:
+ componentStatuses:
+ description: Records the current status information of all Components
+ specified in the NodeCountScalerSpec.
+ items:
+ properties:
+ availableReplicas:
+ description: The number of instances of this component with
+ a Ready condition for at least MinReadySeconds defined in
+ the instance template.
+ format: int32
+ type: integer
+ currentReplicas:
+ description: The current number of instances of this component.
+ format: int32
+ type: integer
+ desiredReplicas:
+ description: The desired number of instances of this component.
+ Usually, it should be the number of nodes.
+ format: int32
+ type: integer
+ name:
+ description: Specified the Component name.
+ type: string
+ readyReplicas:
+ description: The number of instances of this component with
+ a Ready condition.
+ format: int32
+ type: integer
+ required:
+ - availableReplicas
+ - currentReplicas
+ - desiredReplicas
+ - name
+ - readyReplicas
+ type: object
+ type: array
+ conditions:
+ description: 'Represents the latest available observations of a nodecountscaler''s
+ current state. Known .status.conditions.type are: "ScaleReady".
+ ScaleReady - All target components are ready.'
+ items:
+ description: "Condition contains details for one aspect of the current
+ state of this API Resource. --- This struct is intended for direct
+ use as an array at the field path .status.conditions. For example,
+ \n type FooStatus struct{ // Represents the observations of a
+ foo's current state. // Known .status.conditions.type are: \"Available\",
+ \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge
+ // +listType=map // +listMapKey=type Conditions []metav1.Condition
+ `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\"
+ protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }"
+ properties:
+ lastTransitionTime:
+ description: lastTransitionTime is the last time the condition
+ transitioned from one status to another. This should be when
+ the underlying condition changed. If that is not known, then
+ using the time when the API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: message is a human readable message indicating
+ details about the transition. This may be an empty string.
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ description: observedGeneration represents the .metadata.generation
+ that the condition was set based upon. For instance, if .metadata.generation
+ is currently 12, but the .status.conditions[x].observedGeneration
+ is 9, the condition is out of date with respect to the current
+ state of the instance.
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ description: reason contains a programmatic identifier indicating
+ the reason for the condition's last transition. Producers
+ of specific condition types may define expected values and
+ meanings for this field, and whether the values are considered
+ a guaranteed API. The value should be a CamelCase string.
+ This field may not be empty.
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ description: status of the condition, one of True, False, Unknown.
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ description: type of condition in CamelCase or in foo.example.com/CamelCase.
+ --- Many .condition.type values are consistent across resources
+ like Available, but because arbitrary conditions can be useful
+ (see .node.status.conditions), the ability to deconflict is
+ important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - type
+ x-kubernetes-list-type: map
+ lastScaleTime:
+ description: LastScaleTime is the last time the NodeCountScaler scaled
+ the number of instances.
+ format: date-time
+ type: string
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml
index cd05b177030..1a5378fdb52 100644
--- a/config/crd/kustomization.yaml
+++ b/config/crd/kustomization.yaml
@@ -24,6 +24,7 @@ resources:
- bases/apps.kubeblocks.io_opsdefinitions.yaml
- bases/apps.kubeblocks.io_componentversions.yaml
- bases/dataprotection.kubeblocks.io_storageproviders.yaml
+- bases/experimental.kubeblocks.io_nodecountscalers.yaml
#+kubebuilder:scaffold:crdkustomizeresource
patchesStrategicMerge:
@@ -52,6 +53,7 @@ patchesStrategicMerge:
#- patches/webhook_in_components.yaml
#- patches/webhook_in_opsdefinitions.yaml
#- patches/webhook_in_componentversions.yaml
+#- patches/webhook_in_nodecountscalers.yaml
#+kubebuilder:scaffold:crdkustomizewebhookpatch
# [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix.
@@ -79,6 +81,7 @@ patchesStrategicMerge:
#- patches/cainjection_in_components.yaml
#- patches/cainjection_in_opsdefinitions.yaml
#- patches/cainjection_in_componentversions.yaml
+#- patches/cainjection_in_nodecountscalers.yaml
#+kubebuilder:scaffold:crdkustomizecainjectionpatch
# the following config is for teaching kustomize how to do kustomization for CRDs.
diff --git a/config/crd/patches/cainjection_in_experimental_nodecountscalers.yaml b/config/crd/patches/cainjection_in_experimental_nodecountscalers.yaml
new file mode 100644
index 00000000000..05b04101b88
--- /dev/null
+++ b/config/crd/patches/cainjection_in_experimental_nodecountscalers.yaml
@@ -0,0 +1,7 @@
+# The following patch adds a directive for certmanager to inject CA into the CRD
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
+ name: nodecountscalers.experimental.kubeblocks.io
diff --git a/config/crd/patches/webhook_in_experimental_nodecountscalers.yaml b/config/crd/patches/webhook_in_experimental_nodecountscalers.yaml
new file mode 100644
index 00000000000..4eed0e885a7
--- /dev/null
+++ b/config/crd/patches/webhook_in_experimental_nodecountscalers.yaml
@@ -0,0 +1,16 @@
+# The following patch enables a conversion webhook for the CRD
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: nodecountscalers.experimental.kubeblocks.io
+spec:
+ conversion:
+ strategy: Webhook
+ webhook:
+ clientConfig:
+ service:
+ namespace: system
+ name: webhook-service
+ path: /convert
+ conversionReviewVersions:
+ - v1
diff --git a/config/rbac/experimental_nodecountscaler_editor_role.yaml b/config/rbac/experimental_nodecountscaler_editor_role.yaml
new file mode 100644
index 00000000000..13f543ffa30
--- /dev/null
+++ b/config/rbac/experimental_nodecountscaler_editor_role.yaml
@@ -0,0 +1,31 @@
+# permissions for end users to edit nodecountscalers.
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/name: clusterrole
+ app.kubernetes.io/instance: nodecountscaler-editor-role
+ app.kubernetes.io/component: rbac
+ app.kubernetes.io/created-by: kubeblocks
+ app.kubernetes.io/part-of: kubeblocks
+ app.kubernetes.io/managed-by: kustomize
+ name: nodecountscaler-editor-role
+rules:
+- apiGroups:
+ - experimental.kubeblocks.io
+ resources:
+ - nodecountscalers
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - experimental.kubeblocks.io
+ resources:
+ - nodecountscalers/status
+ verbs:
+ - get
diff --git a/config/rbac/experimental_nodecountscaler_viewer_role.yaml b/config/rbac/experimental_nodecountscaler_viewer_role.yaml
new file mode 100644
index 00000000000..742e37d3337
--- /dev/null
+++ b/config/rbac/experimental_nodecountscaler_viewer_role.yaml
@@ -0,0 +1,27 @@
+# permissions for end users to view nodecountscalers.
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/name: clusterrole
+ app.kubernetes.io/instance: nodecountscaler-viewer-role
+ app.kubernetes.io/component: rbac
+ app.kubernetes.io/created-by: kubeblocks
+ app.kubernetes.io/part-of: kubeblocks
+ app.kubernetes.io/managed-by: kustomize
+ name: nodecountscaler-viewer-role
+rules:
+- apiGroups:
+ - experimental.kubeblocks.io
+ resources:
+ - nodecountscalers
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - experimental.kubeblocks.io
+ resources:
+ - nodecountscalers/status
+ verbs:
+ - get
diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml
index 2783d52f365..67c97244b19 100644
--- a/config/rbac/role.yaml
+++ b/config/rbac/role.yaml
@@ -4,6 +4,13 @@ kind: ClusterRole
metadata:
name: manager-role
rules:
+- apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - list
+ - watch
- apiGroups:
- apiextensions.k8s.io
resources:
@@ -780,6 +787,32 @@ rules:
- get
- patch
- update
+- apiGroups:
+ - experimental.kubeblocks.io
+ resources:
+ - nodecountscalers
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - experimental.kubeblocks.io
+ resources:
+ - nodecountscalers/finalizers
+ verbs:
+ - update
+- apiGroups:
+ - experimental.kubeblocks.io
+ resources:
+ - nodecountscalers/status
+ verbs:
+ - get
+ - patch
+ - update
- apiGroups:
- extensions.kubeblocks.io
resources:
diff --git a/config/rbac/workloads_instanceset_editor_role.yaml b/config/rbac/workloads_instanceset_editor_role.yaml
index e0d02903e6c..f6e33e8c5d1 100644
--- a/config/rbac/workloads_instanceset_editor_role.yaml
+++ b/config/rbac/workloads_instanceset_editor_role.yaml
@@ -1,4 +1,4 @@
-# permissions for end users to edit replicatedstatemachines.
+# permissions for end users to edit instancesets.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
diff --git a/config/rbac/workloads_instanceset_viewer_role.yaml b/config/rbac/workloads_instanceset_viewer_role.yaml
index 8fc6e7149f5..d23ab401c4d 100644
--- a/config/rbac/workloads_instanceset_viewer_role.yaml
+++ b/config/rbac/workloads_instanceset_viewer_role.yaml
@@ -1,4 +1,4 @@
-# permissions for end users to view replicatedstatemachines.
+# permissions for end users to view instancesets.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
diff --git a/config/samples/experimental_v1alpha1_nodecountscaler.yaml b/config/samples/experimental_v1alpha1_nodecountscaler.yaml
new file mode 100644
index 00000000000..1e799a39647
--- /dev/null
+++ b/config/samples/experimental_v1alpha1_nodecountscaler.yaml
@@ -0,0 +1,12 @@
+apiVersion: experimental.kubeblocks.io/v1alpha1
+kind: NodeCountScaler
+metadata:
+ labels:
+ app.kubernetes.io/name: nodecountscaler
+ app.kubernetes.io/instance: nodecountscaler-sample
+ app.kubernetes.io/part-of: kubeblocks
+ app.kubernetes.io/managed-by: kustomize
+ app.kubernetes.io/created-by: kubeblocks
+ name: nodecountscaler-sample
+spec:
+ # TODO(user): Add fields here
diff --git a/controllers/experimental/cluster_handler.go b/controllers/experimental/cluster_handler.go
new file mode 100644
index 00000000000..7b3aeae3224
--- /dev/null
+++ b/controllers/experimental/cluster_handler.go
@@ -0,0 +1,66 @@
+/*
+Copyright (C) 2022-2024 ApeCloud Co., Ltd
+
+This file is part of KubeBlocks project
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU Affero General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU Affero General Public License for more details.
+
+You should have received a copy of the GNU Affero General Public License
+along with this program. If not, see .
+*/
+
+package experimental
+
+import (
+ "context"
+
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/client-go/util/workqueue"
+ ctrl "sigs.k8s.io/controller-runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/event"
+ "sigs.k8s.io/controller-runtime/pkg/handler"
+
+ experimental "github.com/apecloud/kubeblocks/apis/experimental/v1alpha1"
+)
+
+type clusterHandler struct {
+ client.Client
+}
+
+func (h *clusterHandler) Create(ctx context.Context, event event.CreateEvent, limitingInterface workqueue.RateLimitingInterface) {
+ h.mapAndEnqueue(ctx, limitingInterface, event.Object)
+}
+
+func (h *clusterHandler) Update(ctx context.Context, event event.UpdateEvent, limitingInterface workqueue.RateLimitingInterface) {
+ h.mapAndEnqueue(ctx, limitingInterface, event.ObjectNew)
+}
+
+func (h *clusterHandler) Delete(ctx context.Context, event event.DeleteEvent, limitingInterface workqueue.RateLimitingInterface) {
+}
+
+func (h *clusterHandler) Generic(ctx context.Context, event event.GenericEvent, limitingInterface workqueue.RateLimitingInterface) {
+}
+
+func (h *clusterHandler) mapAndEnqueue(ctx context.Context, q workqueue.RateLimitingInterface, object client.Object) {
+ scalerList := &experimental.NodeCountScalerList{}
+ if err := h.Client.List(ctx, scalerList); err == nil {
+ for _, item := range scalerList.Items {
+ if item.Spec.TargetClusterName == object.GetName() &&
+ item.Namespace == object.GetNamespace() {
+ q.Add(ctrl.Request{NamespacedName: types.NamespacedName{Namespace: item.Namespace, Name: item.Name}})
+ break
+ }
+ }
+ }
+}
+
+var _ handler.EventHandler = &clusterHandler{}
diff --git a/controllers/experimental/node_scaling_handler.go b/controllers/experimental/node_scaling_handler.go
new file mode 100644
index 00000000000..9e30d49842a
--- /dev/null
+++ b/controllers/experimental/node_scaling_handler.go
@@ -0,0 +1,62 @@
+/*
+Copyright (C) 2022-2024 ApeCloud Co., Ltd
+
+This file is part of KubeBlocks project
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU Affero General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU Affero General Public License for more details.
+
+You should have received a copy of the GNU Affero General Public License
+along with this program. If not, see .
+*/
+
+package experimental
+
+import (
+ "context"
+
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/client-go/util/workqueue"
+ ctrl "sigs.k8s.io/controller-runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/event"
+ "sigs.k8s.io/controller-runtime/pkg/handler"
+
+ experimental "github.com/apecloud/kubeblocks/apis/experimental/v1alpha1"
+)
+
+type nodeScalingHandler struct {
+ client.Client
+}
+
+func (h *nodeScalingHandler) Create(ctx context.Context, event event.CreateEvent, limitingInterface workqueue.RateLimitingInterface) {
+ h.mapAndEnqueue(ctx, limitingInterface)
+}
+
+func (h *nodeScalingHandler) Update(ctx context.Context, event event.UpdateEvent, limitingInterface workqueue.RateLimitingInterface) {
+}
+
+func (h *nodeScalingHandler) Delete(ctx context.Context, event event.DeleteEvent, limitingInterface workqueue.RateLimitingInterface) {
+ h.mapAndEnqueue(ctx, limitingInterface)
+}
+
+func (h *nodeScalingHandler) Generic(ctx context.Context, event event.GenericEvent, limitingInterface workqueue.RateLimitingInterface) {
+}
+
+func (h *nodeScalingHandler) mapAndEnqueue(ctx context.Context, q workqueue.RateLimitingInterface) {
+ scalerList := &experimental.NodeCountScalerList{}
+ if err := h.Client.List(ctx, scalerList); err == nil {
+ for _, item := range scalerList.Items {
+ q.Add(ctrl.Request{NamespacedName: types.NamespacedName{Namespace: item.Namespace, Name: item.Name}})
+ }
+ }
+}
+
+var _ handler.EventHandler = &nodeScalingHandler{}
diff --git a/controllers/experimental/nodecountscaler_controller.go b/controllers/experimental/nodecountscaler_controller.go
new file mode 100644
index 00000000000..00b8db6bd58
--- /dev/null
+++ b/controllers/experimental/nodecountscaler_controller.go
@@ -0,0 +1,79 @@
+/*
+Copyright (C) 2022-2024 ApeCloud Co., Ltd
+
+This file is part of KubeBlocks project
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU Affero General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU Affero General Public License for more details.
+
+You should have received a copy of the GNU Affero General Public License
+along with this program. If not, see .
+*/
+
+package experimental
+
+import (
+ "context"
+
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/client-go/tools/record"
+ ctrl "sigs.k8s.io/controller-runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/log"
+
+ appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1"
+ experimental "github.com/apecloud/kubeblocks/apis/experimental/v1alpha1"
+ "github.com/apecloud/kubeblocks/pkg/controller/kubebuilderx"
+)
+
+// NodeCountScalerReconciler reconciles a NodeCountScaler object
+type NodeCountScalerReconciler struct {
+ client.Client
+ Scheme *runtime.Scheme
+ Recorder record.EventRecorder
+}
+
+//+kubebuilder:rbac:groups=experimental.kubeblocks.io,resources=nodecountscalers,verbs=get;list;watch;create;update;patch;delete
+//+kubebuilder:rbac:groups=experimental.kubeblocks.io,resources=nodecountscalers/status,verbs=get;update;patch
+//+kubebuilder:rbac:groups=experimental.kubeblocks.io,resources=nodecountscalers/finalizers,verbs=update
+
+// +kubebuilder:rbac:groups=apps.kubeblocks.io,resources=clusters,verbs=get;list;watch;update;patch
+
+// +kubebuilder:rbac:groups=workloads.kubeblocks.io,resources=instancesets,verbs=get;list;watch
+// +kubebuilder:rbac:groups=workloads.kubeblocks.io,resources=instancesets/status,verbs=get
+
+//+kubebuilder:rbac:groups="",resources=nodes,verbs=list;watch
+
+// Reconcile is part of the main kubernetes reconciliation loop which aims to
+// move the current state of the cluster closer to the desired state.
+//
+// For more details, check Reconcile and its Result here:
+// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.14.4/pkg/reconcile
+func (r *NodeCountScalerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
+ logger := log.FromContext(ctx).WithValues("NodeCountScaler", req.NamespacedName)
+
+ err := kubebuilderx.NewController(ctx, r.Client, req, r.Recorder, logger).
+ Prepare(objectTree()).
+ Do(scaleTargetCluster()).
+ Do(updateStatus()).
+ Commit()
+
+ return ctrl.Result{}, err
+}
+
+// SetupWithManager sets up the controller with the Manager.
+func (r *NodeCountScalerReconciler) SetupWithManager(mgr ctrl.Manager) error {
+ return ctrl.NewControllerManagedBy(mgr).
+ For(&experimental.NodeCountScaler{}).
+ Watches(&corev1.Node{}, &nodeScalingHandler{r.Client}).
+ Watches(&appsv1alpha1.Cluster{}, &clusterHandler{r.Client}).
+ Complete(r)
+}
diff --git a/controllers/experimental/reconciler_scale_target_cluster.go b/controllers/experimental/reconciler_scale_target_cluster.go
new file mode 100644
index 00000000000..0b8063dfa5e
--- /dev/null
+++ b/controllers/experimental/reconciler_scale_target_cluster.go
@@ -0,0 +1,85 @@
+/*
+Copyright (C) 2022-2024 ApeCloud Co., Ltd
+
+This file is part of KubeBlocks project
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU Affero General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU Affero General Public License for more details.
+
+You should have received a copy of the GNU Affero General Public License
+along with this program. If not, see .
+*/
+
+package experimental
+
+import (
+ "time"
+
+ "golang.org/x/exp/slices"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1"
+ experimental "github.com/apecloud/kubeblocks/apis/experimental/v1alpha1"
+ "github.com/apecloud/kubeblocks/pkg/controller/builder"
+ "github.com/apecloud/kubeblocks/pkg/controller/kubebuilderx"
+ "github.com/apecloud/kubeblocks/pkg/controller/model"
+)
+
+type scaleTargetClusterReconciler struct{}
+
+func (r *scaleTargetClusterReconciler) PreCondition(tree *kubebuilderx.ObjectTree) *kubebuilderx.CheckResult {
+ if tree.GetRoot() == nil || model.IsObjectDeleting(tree.GetRoot()) {
+ return kubebuilderx.ResultUnsatisfied
+ }
+ return kubebuilderx.ResultSatisfied
+}
+
+func (r *scaleTargetClusterReconciler) Reconcile(tree *kubebuilderx.ObjectTree) (*kubebuilderx.ObjectTree, error) {
+ scaler, _ := tree.GetRoot().(*experimental.NodeCountScaler)
+ clusterKey := builder.NewClusterBuilder(scaler.Namespace, scaler.Spec.TargetClusterName).GetObject()
+ object, err := tree.Get(clusterKey)
+ if err != nil {
+ return nil, err
+ }
+ cluster, _ := object.(*appsv1alpha1.Cluster)
+ nodes := tree.List(&corev1.Node{})
+ // TODO(free6om): filter nodes that satisfy pod template spec of each component (by nodeSelector, nodeAffinity&nodeAntiAffinity, tolerations)
+ desiredReplicas := int32(len(nodes))
+ scaled := false
+ for i := range cluster.Spec.ComponentSpecs {
+ spec := &cluster.Spec.ComponentSpecs[i]
+ if slices.IndexFunc(scaler.Spec.TargetComponentNames, func(name string) bool {
+ return name == spec.Name
+ }) < 0 {
+ continue
+ }
+ if spec.Replicas != desiredReplicas {
+ spec.Replicas = desiredReplicas
+ scaled = true
+ }
+ }
+ if !scaled {
+ return tree, nil
+ }
+
+ scaler.Status.LastScaleTime = metav1.Time{Time: time.Now()}
+ if err = tree.Update(cluster); err != nil {
+ return nil, err
+ }
+
+ return tree, nil
+}
+
+func scaleTargetCluster() kubebuilderx.Reconciler {
+ return &scaleTargetClusterReconciler{}
+}
+
+var _ kubebuilderx.Reconciler = &scaleTargetClusterReconciler{}
diff --git a/controllers/experimental/reconciler_scale_target_cluster_test.go b/controllers/experimental/reconciler_scale_target_cluster_test.go
new file mode 100644
index 00000000000..0c7e471a08f
--- /dev/null
+++ b/controllers/experimental/reconciler_scale_target_cluster_test.go
@@ -0,0 +1,64 @@
+/*
+Copyright (C) 2022-2024 ApeCloud Co., Ltd
+
+This file is part of KubeBlocks project
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU Affero General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU Affero General Public License for more details.
+
+You should have received a copy of the GNU Affero General Public License
+along with this program. If not, see .
+*/
+
+package experimental
+
+import (
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1"
+ experimentalv1alpha1 "github.com/apecloud/kubeblocks/apis/experimental/v1alpha1"
+ "github.com/apecloud/kubeblocks/pkg/controller/builder"
+ "github.com/apecloud/kubeblocks/pkg/controller/kubebuilderx"
+)
+
+var _ = Describe("scale target cluster reconciler test", func() {
+ BeforeEach(func() {
+ tree = mockTestTree()
+ })
+
+ Context("PreCondition & Reconcile", func() {
+ It("should work well", func() {
+ By("PreCondition")
+ reconciler := scaleTargetCluster()
+ Expect(reconciler.PreCondition(tree)).Should(Equal(kubebuilderx.ResultSatisfied))
+
+ By("Reconcile")
+ beforeReconcile := metav1.Now()
+ newTree, err := reconciler.Reconcile(tree)
+ Expect(err).Should(BeNil())
+ newNCS, ok := newTree.GetRoot().(*experimentalv1alpha1.NodeCountScaler)
+ Expect(ok).Should(BeTrue())
+ Expect(newNCS.Status.LastScaleTime.Compare(beforeReconcile.Time)).Should(BeNumerically(">=", 0))
+ object, err := newTree.Get(builder.NewClusterBuilder(newNCS.Namespace, newNCS.Spec.TargetClusterName).GetObject())
+ Expect(err).Should(BeNil())
+ newCluster, ok := object.(*appsv1alpha1.Cluster)
+ Expect(ok).Should(BeTrue())
+ nodes := newTree.List(&corev1.Node{})
+ desiredReplicas := int32(len(nodes))
+ Expect(newCluster.Spec.ComponentSpecs).Should(HaveLen(2))
+ Expect(newCluster.Spec.ComponentSpecs[0].Replicas).Should(Equal(desiredReplicas))
+ Expect(newCluster.Spec.ComponentSpecs[1].Replicas).Should(Equal(desiredReplicas))
+ })
+ })
+})
diff --git a/controllers/experimental/reconciler_update_status.go b/controllers/experimental/reconciler_update_status.go
new file mode 100644
index 00000000000..6c5c4c51e68
--- /dev/null
+++ b/controllers/experimental/reconciler_update_status.go
@@ -0,0 +1,132 @@
+/*
+Copyright (C) 2022-2024 ApeCloud Co., Ltd
+
+This file is part of KubeBlocks project
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU Affero General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU Affero General Public License for more details.
+
+You should have received a copy of the GNU Affero General Public License
+along with this program. If not, see .
+*/
+
+package experimental
+
+import (
+ "fmt"
+ "strings"
+
+ "golang.org/x/exp/slices"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/meta"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ experimental "github.com/apecloud/kubeblocks/apis/experimental/v1alpha1"
+ workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1"
+ "github.com/apecloud/kubeblocks/pkg/constant"
+ "github.com/apecloud/kubeblocks/pkg/controller/kubebuilderx"
+ "github.com/apecloud/kubeblocks/pkg/controller/model"
+ intctrlutil "github.com/apecloud/kubeblocks/pkg/controllerutil"
+)
+
+type updateStatusReconciler struct{}
+
+func (r *updateStatusReconciler) PreCondition(tree *kubebuilderx.ObjectTree) *kubebuilderx.CheckResult {
+ if tree.GetRoot() == nil || model.IsObjectDeleting(tree.GetRoot()) {
+ return kubebuilderx.ResultUnsatisfied
+ }
+ return kubebuilderx.ResultSatisfied
+}
+
+func (r *updateStatusReconciler) Reconcile(tree *kubebuilderx.ObjectTree) (*kubebuilderx.ObjectTree, error) {
+ scaler, _ := tree.GetRoot().(*experimental.NodeCountScaler)
+ itsList := tree.List(&workloads.InstanceSet{})
+ nodes := tree.List(&corev1.Node{})
+ // TODO(free6om): filter nodes that satisfy pod template spec of each component (by nodeSelector, nodeAffinity&nodeAntiAffinity, tolerations)
+ desiredReplicas := int32(len(nodes))
+ var statusList []experimental.ComponentStatus
+ for _, name := range scaler.Spec.TargetComponentNames {
+ index := slices.IndexFunc(itsList, func(object client.Object) bool {
+ fullName := constant.GenerateClusterComponentName(scaler.Spec.TargetClusterName, name)
+ return fullName == object.GetName()
+ })
+ if index < 0 {
+ continue
+ }
+ its, _ := itsList[index].(*workloads.InstanceSet)
+ status := experimental.ComponentStatus{
+ Name: name,
+ CurrentReplicas: its.Status.CurrentReplicas,
+ ReadyReplicas: its.Status.ReadyReplicas,
+ AvailableReplicas: its.Status.AvailableReplicas,
+ DesiredReplicas: desiredReplicas,
+ }
+ statusList = append(statusList, status)
+ }
+ intctrlutil.MergeList(&statusList, &scaler.Status.ComponentStatuses,
+ func(item experimental.ComponentStatus) func(experimental.ComponentStatus) bool {
+ return func(status experimental.ComponentStatus) bool {
+ return item.Name == status.Name
+ }
+ })
+
+ condition := buildScaleReadyCondition(scaler)
+ meta.SetStatusCondition(&scaler.Status.Conditions, *condition)
+
+ return tree, nil
+}
+
+func buildScaleReadyCondition(scaler *experimental.NodeCountScaler) *metav1.Condition {
+ var (
+ ready = true
+ notReadyNames []string
+ )
+ for _, name := range scaler.Spec.TargetComponentNames {
+ index := slices.IndexFunc(scaler.Status.ComponentStatuses, func(status experimental.ComponentStatus) bool {
+ return status.Name == name
+ })
+ if index < 0 {
+ ready = false
+ notReadyNames = append(notReadyNames, name)
+ continue
+ }
+ status := scaler.Status.ComponentStatuses[index]
+ if status.CurrentReplicas != status.DesiredReplicas ||
+ status.ReadyReplicas != status.DesiredReplicas ||
+ status.AvailableReplicas != status.DesiredReplicas {
+ ready = false
+ notReadyNames = append(notReadyNames, name)
+ }
+ }
+
+ if !ready {
+ return &metav1.Condition{
+ Type: string(experimental.ScaleReady),
+ Status: metav1.ConditionFalse,
+ ObservedGeneration: scaler.Generation,
+ Reason: experimental.ReasonNotReady,
+ Message: fmt.Sprintf("not ready components: %s", strings.Join(notReadyNames, ",")),
+ }
+ }
+ return &metav1.Condition{
+ Type: string(experimental.ScaleReady),
+ Status: metav1.ConditionTrue,
+ ObservedGeneration: scaler.Generation,
+ Reason: experimental.ReasonReady,
+ Message: "scale ready",
+ }
+}
+
+func updateStatus() kubebuilderx.Reconciler {
+ return &updateStatusReconciler{}
+}
+
+var _ kubebuilderx.Reconciler = &updateStatusReconciler{}
diff --git a/controllers/experimental/reconciler_update_status_test.go b/controllers/experimental/reconciler_update_status_test.go
new file mode 100644
index 00000000000..93154daf45e
--- /dev/null
+++ b/controllers/experimental/reconciler_update_status_test.go
@@ -0,0 +1,84 @@
+/*
+Copyright (C) 2022-2024 ApeCloud Co., Ltd
+
+This file is part of KubeBlocks project
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU Affero General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU Affero General Public License for more details.
+
+You should have received a copy of the GNU Affero General Public License
+along with this program. If not, see .
+*/
+
+package experimental
+
+import (
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ experimentalv1alpha1 "github.com/apecloud/kubeblocks/apis/experimental/v1alpha1"
+ workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1"
+ "github.com/apecloud/kubeblocks/pkg/controller/kubebuilderx"
+)
+
+var _ = Describe("update status reconciler test", func() {
+ BeforeEach(func() {
+ tree = mockTestTree()
+ })
+
+ Context("PreCondition & Reconcile", func() {
+ It("should work well", func() {
+ var reconciler kubebuilderx.Reconciler
+
+ By("scale target cluster")
+ reconciler = scaleTargetCluster()
+ Expect(reconciler.PreCondition(tree)).Should(Equal(kubebuilderx.ResultSatisfied))
+ newTree, err := reconciler.Reconcile(tree)
+ Expect(err).Should(BeNil())
+
+ By("mock the workload to scale ready")
+ nodes := tree.List(&corev1.Node{})
+ desiredReplicas := int32(len(nodes))
+ itsList := tree.List(&workloads.InstanceSet{})
+ for _, object := range itsList {
+ its, ok := object.(*workloads.InstanceSet)
+ Expect(ok).Should(BeTrue())
+ its.Status.CurrentReplicas = desiredReplicas
+ its.Status.ReadyReplicas = desiredReplicas
+ its.Status.AvailableReplicas = desiredReplicas
+ }
+
+ By("update status")
+ reconciler = updateStatus()
+ Expect(reconciler.PreCondition(newTree)).Should(Equal(kubebuilderx.ResultSatisfied))
+ newTree, err = reconciler.Reconcile(tree)
+ Expect(err).Should(BeNil())
+ newNCS, ok := newTree.GetRoot().(*experimentalv1alpha1.NodeCountScaler)
+ Expect(ok).Should(BeTrue())
+ Expect(newNCS.Status.ComponentStatuses).Should(HaveLen(2))
+ Expect(newNCS.Status.ComponentStatuses[0].CurrentReplicas).Should(Equal(desiredReplicas))
+ Expect(newNCS.Status.ComponentStatuses[0].ReadyReplicas).Should(Equal(desiredReplicas))
+ Expect(newNCS.Status.ComponentStatuses[0].AvailableReplicas).Should(Equal(desiredReplicas))
+ Expect(newNCS.Status.ComponentStatuses[0].DesiredReplicas).Should(Equal(desiredReplicas))
+ Expect(newNCS.Status.ComponentStatuses[1].CurrentReplicas).Should(Equal(desiredReplicas))
+ Expect(newNCS.Status.ComponentStatuses[1].ReadyReplicas).Should(Equal(desiredReplicas))
+ Expect(newNCS.Status.ComponentStatuses[1].AvailableReplicas).Should(Equal(desiredReplicas))
+ Expect(newNCS.Status.ComponentStatuses[1].DesiredReplicas).Should(Equal(desiredReplicas))
+ Expect(newNCS.Status.Conditions).Should(HaveLen(1))
+ Expect(newNCS.Status.Conditions[0].Type).Should(BeEquivalentTo(experimentalv1alpha1.ScaleReady))
+ Expect(newNCS.Status.Conditions[0].Status).Should(Equal(metav1.ConditionTrue))
+ Expect(newNCS.Status.Conditions[0].Reason).Should(Equal(experimentalv1alpha1.ReasonReady))
+ Expect(newNCS.Status.Conditions[0].Message).Should(Equal("scale ready"))
+ })
+ })
+})
diff --git a/controllers/experimental/suite_test.go b/controllers/experimental/suite_test.go
new file mode 100644
index 00000000000..fe706753c9a
--- /dev/null
+++ b/controllers/experimental/suite_test.go
@@ -0,0 +1,113 @@
+/*
+Copyright (C) 2022-2024 ApeCloud Co., Ltd
+
+This file is part of KubeBlocks project
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU Affero General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU Affero General Public License for more details.
+
+You should have received a copy of the GNU Affero General Public License
+along with this program. If not, see .
+*/
+
+package experimental
+
+import (
+ "testing"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ logf "sigs.k8s.io/controller-runtime/pkg/log"
+ "sigs.k8s.io/controller-runtime/pkg/log/zap"
+
+ //+kubebuilder:scaffold:imports
+ appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1"
+ experimentalv1alpha1 "github.com/apecloud/kubeblocks/apis/experimental/v1alpha1"
+ workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1"
+ "github.com/apecloud/kubeblocks/pkg/constant"
+ "github.com/apecloud/kubeblocks/pkg/controller/builder"
+ "github.com/apecloud/kubeblocks/pkg/controller/kubebuilderx"
+ "github.com/apecloud/kubeblocks/pkg/controller/model"
+)
+
+// These tests use Ginkgo (BDD-style Go testing framework). Refer to
+// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
+
+const (
+ namespace = "foo"
+ name = "bar"
+)
+
+var (
+ tree *kubebuilderx.ObjectTree
+ ncs *experimentalv1alpha1.NodeCountScaler
+ clusterName = "foo"
+ componentNames = []string{"bar-0", "bar-1"}
+)
+
+func mockTestTree() *kubebuilderx.ObjectTree {
+ ncs = builder.NewNodeCountScalerBuilder(namespace, name).
+ SetTargetClusterName(clusterName).
+ SetTargetComponentNames(componentNames).
+ GetObject()
+
+ specs := []appsv1alpha1.ClusterComponentSpec{
+ {
+ Name: componentNames[0],
+ },
+ {
+ Name: componentNames[1],
+ },
+ }
+ cluster := builder.NewClusterBuilder(namespace, clusterName).SetComponentSpecs(specs).GetObject()
+ its0 := builder.NewInstanceSetBuilder(namespace, constant.GenerateClusterComponentName(clusterName, componentNames[0])).GetObject()
+ its1 := builder.NewInstanceSetBuilder(namespace, constant.GenerateClusterComponentName(clusterName, componentNames[1])).GetObject()
+ node0 := &corev1.Node{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: "node-0",
+ },
+ }
+ node1 := &corev1.Node{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: "node-1",
+ },
+ }
+
+ tree = kubebuilderx.NewObjectTree()
+ tree.SetRoot(ncs)
+ Expect(tree.Add(cluster, its0, its1, node0, node1))
+
+ return tree
+}
+
+func TestAPIs(t *testing.T) {
+ RegisterFailHandler(Fail)
+
+ RunSpecs(t, "Controller Suite")
+}
+
+var _ = BeforeSuite(func() {
+ logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)))
+
+ model.AddScheme(experimentalv1alpha1.AddToScheme)
+ model.AddScheme(appsv1alpha1.AddToScheme)
+ model.AddScheme(workloads.AddToScheme)
+
+ //+kubebuilder:scaffold:scheme
+
+})
+
+var _ = AfterSuite(func() {
+})
diff --git a/controllers/experimental/tree_loader.go b/controllers/experimental/tree_loader.go
new file mode 100644
index 00000000000..1b9cb35e669
--- /dev/null
+++ b/controllers/experimental/tree_loader.go
@@ -0,0 +1,90 @@
+/*
+Copyright (C) 2022-2024 ApeCloud Co., Ltd
+
+This file is part of KubeBlocks project
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU Affero General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU Affero General Public License for more details.
+
+You should have received a copy of the GNU Affero General Public License
+along with this program. If not, see .
+*/
+
+package experimental
+
+import (
+ "context"
+
+ "github.com/go-logr/logr"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/client-go/tools/record"
+ ctrl "sigs.k8s.io/controller-runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1"
+ experimental "github.com/apecloud/kubeblocks/apis/experimental/v1alpha1"
+ workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1"
+ "github.com/apecloud/kubeblocks/pkg/constant"
+ "github.com/apecloud/kubeblocks/pkg/controller/kubebuilderx"
+)
+
+type treeLoader struct{}
+
+func (t *treeLoader) Load(ctx context.Context, reader client.Reader, req ctrl.Request, recorder record.EventRecorder, logger logr.Logger) (*kubebuilderx.ObjectTree, error) {
+ tree, err := kubebuilderx.ReadObjectTree[*experimental.NodeCountScaler](ctx, reader, req, nil)
+ if err != nil {
+ return nil, err
+ }
+ root := tree.GetRoot()
+ if root == nil {
+ return tree, nil
+ }
+ scaler, _ := root.(*experimental.NodeCountScaler)
+ key := types.NamespacedName{Namespace: scaler.Namespace, Name: scaler.Spec.TargetClusterName}
+ cluster := &appsv1alpha1.Cluster{}
+ if err = reader.Get(ctx, key, cluster); err != nil {
+ return nil, err
+ }
+ if err = tree.Add(cluster); err != nil {
+ return nil, err
+ }
+ for _, compName := range scaler.Spec.TargetComponentNames {
+ name := constant.GenerateClusterComponentName(scaler.Spec.TargetClusterName, compName)
+ key = types.NamespacedName{Namespace: scaler.Namespace, Name: name}
+ its := &workloads.InstanceSet{}
+ if err = reader.Get(ctx, key, its); err != nil {
+ return nil, err
+ }
+ if err = tree.Add(its); err != nil {
+ return nil, err
+ }
+ }
+ nodeList := &corev1.NodeList{}
+ if err = reader.List(ctx, nodeList); err != nil {
+ return nil, err
+ }
+ for i := range nodeList.Items {
+ if err = tree.Add(&nodeList.Items[i]); err != nil {
+ return nil, err
+ }
+ }
+
+ tree.EventRecorder = recorder
+ tree.Logger = logger
+
+ return tree, nil
+}
+
+func objectTree() kubebuilderx.TreeLoader {
+ return &treeLoader{}
+}
+
+var _ kubebuilderx.TreeLoader = &treeLoader{}
diff --git a/controllers/experimental/tree_loader_test.go b/controllers/experimental/tree_loader_test.go
new file mode 100644
index 00000000000..7a6f6eb9185
--- /dev/null
+++ b/controllers/experimental/tree_loader_test.go
@@ -0,0 +1,113 @@
+/*
+Copyright (C) 2022-2024 ApeCloud Co., Ltd
+
+This file is part of KubeBlocks project
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU Affero General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU Affero General Public License for more details.
+
+You should have received a copy of the GNU Affero General Public License
+along with this program. If not, see .
+*/
+
+package experimental
+
+import (
+ "context"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+
+ "github.com/golang/mock/gomock"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ ctrl "sigs.k8s.io/controller-runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ logf "sigs.k8s.io/controller-runtime/pkg/log"
+
+ appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1"
+ experimental "github.com/apecloud/kubeblocks/apis/experimental/v1alpha1"
+ workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1"
+ "github.com/apecloud/kubeblocks/pkg/constant"
+ "github.com/apecloud/kubeblocks/pkg/controller/builder"
+ testutil "github.com/apecloud/kubeblocks/pkg/testutil/k8s"
+)
+
+var _ = Describe("tree loader test", func() {
+ Context("Read", func() {
+ It("should work well", func() {
+ ctx := context.Background()
+ logger := logf.FromContext(ctx).WithValues("tree-loader-test", "foo")
+ controller, k8sMock := testutil.SetupK8sMock()
+ defer controller.Finish()
+
+ clusterName := "foo"
+ componentNames := []string{"bar-0", "bar-1"}
+ root := builder.NewNodeCountScalerBuilder(namespace, name).SetTargetClusterName(clusterName).SetTargetComponentNames(componentNames).GetObject()
+ cluster := builder.NewClusterBuilder(namespace, clusterName).GetObject()
+ its0 := builder.NewInstanceSetBuilder(namespace, constant.GenerateClusterComponentName(clusterName, componentNames[0])).GetObject()
+ its1 := builder.NewInstanceSetBuilder(namespace, constant.GenerateClusterComponentName(clusterName, componentNames[1])).GetObject()
+ node0 := &corev1.Node{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: "node-0",
+ },
+ }
+ node1 := &corev1.Node{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: "node-1",
+ },
+ }
+
+ k8sMock.EXPECT().
+ Get(gomock.Any(), gomock.Any(), &experimental.NodeCountScaler{}, gomock.Any()).
+ DoAndReturn(func(_ context.Context, objKey client.ObjectKey, obj *experimental.NodeCountScaler, _ ...client.GetOption) error {
+ *obj = *root
+ return nil
+ }).Times(1)
+ k8sMock.EXPECT().
+ Get(gomock.Any(), gomock.Any(), &appsv1alpha1.Cluster{}, gomock.Any()).
+ DoAndReturn(func(_ context.Context, objKey client.ObjectKey, obj *appsv1alpha1.Cluster, _ ...client.GetOption) error {
+ *obj = *cluster
+ return nil
+ }).Times(1)
+ k8sMock.EXPECT().
+ Get(gomock.Any(), gomock.Any(), &workloads.InstanceSet{}, gomock.Any()).
+ DoAndReturn(func(_ context.Context, objKey client.ObjectKey, obj *workloads.InstanceSet, _ ...client.GetOption) error {
+ if objKey.Name == its0.Name {
+ *obj = *its0
+ } else {
+ *obj = *its1
+ }
+ return nil
+ }).Times(2)
+ k8sMock.EXPECT().
+ List(gomock.Any(), &corev1.NodeList{}, gomock.Any()).
+ DoAndReturn(func(_ context.Context, list *corev1.NodeList, _ ...client.ListOption) error {
+ list.Items = []corev1.Node{*node0, *node1}
+ return nil
+ }).Times(1)
+ req := ctrl.Request{NamespacedName: client.ObjectKeyFromObject(root)}
+ loader := objectTree()
+ tree, err := loader.Load(ctx, k8sMock, req, nil, logger)
+ Expect(err).Should(BeNil())
+ Expect(tree.GetRoot()).ShouldNot(BeNil())
+ Expect(tree.GetRoot()).Should(Equal(root))
+ Expect(tree.GetSecondaryObjects()).Should(HaveLen(5))
+ objectList := []client.Object{cluster, its0, its1, node0, node1}
+ for _, object := range objectList {
+ obj, err := tree.Get(object)
+ Expect(err).Should(BeNil())
+ Expect(obj).Should(Equal(object))
+ }
+ })
+ })
+})
diff --git a/deploy/helm/config/rbac/role.yaml b/deploy/helm/config/rbac/role.yaml
index 2783d52f365..67c97244b19 100644
--- a/deploy/helm/config/rbac/role.yaml
+++ b/deploy/helm/config/rbac/role.yaml
@@ -4,6 +4,13 @@ kind: ClusterRole
metadata:
name: manager-role
rules:
+- apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - list
+ - watch
- apiGroups:
- apiextensions.k8s.io
resources:
@@ -780,6 +787,32 @@ rules:
- get
- patch
- update
+- apiGroups:
+ - experimental.kubeblocks.io
+ resources:
+ - nodecountscalers
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - experimental.kubeblocks.io
+ resources:
+ - nodecountscalers/finalizers
+ verbs:
+ - update
+- apiGroups:
+ - experimental.kubeblocks.io
+ resources:
+ - nodecountscalers/status
+ verbs:
+ - get
+ - patch
+ - update
- apiGroups:
- extensions.kubeblocks.io
resources:
diff --git a/deploy/helm/crds/experimental.kubeblocks.io_nodecountscalers.yaml b/deploy/helm/crds/experimental.kubeblocks.io_nodecountscalers.yaml
new file mode 100644
index 00000000000..73eba8e34ac
--- /dev/null
+++ b/deploy/helm/crds/experimental.kubeblocks.io_nodecountscalers.yaml
@@ -0,0 +1,199 @@
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.12.1
+ labels:
+ app.kubernetes.io/name: kubeblocks
+ name: nodecountscalers.experimental.kubeblocks.io
+spec:
+ group: experimental.kubeblocks.io
+ names:
+ categories:
+ - kubeblocks
+ - all
+ kind: NodeCountScaler
+ listKind: NodeCountScalerList
+ plural: nodecountscalers
+ shortNames:
+ - ncs
+ singular: nodecountscaler
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: target cluster name.
+ jsonPath: .spec.targetClusterName
+ name: TARGET-CLUSTER-NAME
+ type: string
+ - description: scale ready.
+ jsonPath: .status.conditions[?(@.type=="ScaleReady")].status
+ name: READY
+ type: string
+ - description: reason.
+ jsonPath: .status.conditions[?(@.type=="ScaleReady")].reason
+ name: REASON
+ type: string
+ - description: message.
+ jsonPath: .status.conditions[?(@.type=="ScaleReady")].message
+ name: MESSAGE
+ type: string
+ - jsonPath: .status.lastScaleTime
+ name: LAST-SCALE-TIME
+ type: date
+ name: v1alpha1
+ schema:
+ openAPIV3Schema:
+ description: NodeCountScaler is the Schema for the nodecountscalers API
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: NodeCountScalerSpec defines the desired state of NodeCountScaler
+ properties:
+ targetClusterName:
+ description: Specified the target Cluster name this scaler applies
+ to.
+ type: string
+ targetComponentNames:
+ description: Specified the target Component names this scaler applies
+ to. All Components will be applied if not set.
+ items:
+ type: string
+ type: array
+ required:
+ - targetClusterName
+ type: object
+ status:
+ description: NodeCountScalerStatus defines the observed state of NodeCountScaler
+ properties:
+ componentStatuses:
+ description: Records the current status information of all Components
+ specified in the NodeCountScalerSpec.
+ items:
+ properties:
+ availableReplicas:
+ description: The number of instances of this component with
+ a Ready condition for at least MinReadySeconds defined in
+ the instance template.
+ format: int32
+ type: integer
+ currentReplicas:
+ description: The current number of instances of this component.
+ format: int32
+ type: integer
+ desiredReplicas:
+ description: The desired number of instances of this component.
+ Usually, it should be the number of nodes.
+ format: int32
+ type: integer
+ name:
+ description: Specified the Component name.
+ type: string
+ readyReplicas:
+ description: The number of instances of this component with
+ a Ready condition.
+ format: int32
+ type: integer
+ required:
+ - availableReplicas
+ - currentReplicas
+ - desiredReplicas
+ - name
+ - readyReplicas
+ type: object
+ type: array
+ conditions:
+ description: 'Represents the latest available observations of a nodecountscaler''s
+ current state. Known .status.conditions.type are: "ScaleReady".
+ ScaleReady - All target components are ready.'
+ items:
+ description: "Condition contains details for one aspect of the current
+ state of this API Resource. --- This struct is intended for direct
+ use as an array at the field path .status.conditions. For example,
+ \n type FooStatus struct{ // Represents the observations of a
+ foo's current state. // Known .status.conditions.type are: \"Available\",
+ \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge
+ // +listType=map // +listMapKey=type Conditions []metav1.Condition
+ `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\"
+ protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }"
+ properties:
+ lastTransitionTime:
+ description: lastTransitionTime is the last time the condition
+ transitioned from one status to another. This should be when
+ the underlying condition changed. If that is not known, then
+ using the time when the API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: message is a human readable message indicating
+ details about the transition. This may be an empty string.
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ description: observedGeneration represents the .metadata.generation
+ that the condition was set based upon. For instance, if .metadata.generation
+ is currently 12, but the .status.conditions[x].observedGeneration
+ is 9, the condition is out of date with respect to the current
+ state of the instance.
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ description: reason contains a programmatic identifier indicating
+ the reason for the condition's last transition. Producers
+ of specific condition types may define expected values and
+ meanings for this field, and whether the values are considered
+ a guaranteed API. The value should be a CamelCase string.
+ This field may not be empty.
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ description: status of the condition, one of True, False, Unknown.
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ description: type of condition in CamelCase or in foo.example.com/CamelCase.
+ --- Many .condition.type values are consistent across resources
+ like Available, but because arbitrary conditions can be useful
+ (see .node.status.conditions), the ability to deconflict is
+ important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - type
+ x-kubernetes-list-type: map
+ lastScaleTime:
+ description: LastScaleTime is the last time the NodeCountScaler scaled
+ the number of instances.
+ format: date-time
+ type: string
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
diff --git a/deploy/helm/templates/deployment.yaml b/deploy/helm/templates/deployment.yaml
index 4c291b5fa15..33c59c9a130 100644
--- a/deploy/helm/templates/deployment.yaml
+++ b/deploy/helm/templates/deployment.yaml
@@ -161,6 +161,10 @@ spec:
- name: ENABLED_RUNTIME_METRICS
value: "true"
{{- end }}
+ {{- if .Values.controllers.experimental.enabled }}
+ - name: experimental
+ value: "true"
+ {{- end }}
{{- with .Values.securityContext }}
securityContext:
{{- toYaml . | nindent 12 }}
diff --git a/deploy/helm/templates/rbac/experimental_nodecountscaler_editor_role.yaml b/deploy/helm/templates/rbac/experimental_nodecountscaler_editor_role.yaml
new file mode 100644
index 00000000000..c5c57d1df4e
--- /dev/null
+++ b/deploy/helm/templates/rbac/experimental_nodecountscaler_editor_role.yaml
@@ -0,0 +1,26 @@
+# permissions for end users to edit nodecountscalers.
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ {{- include "kubeblocks.labels" . | nindent 4 }}
+ name: {{ include "kubeblocks.fullname" . }}-nodecountscaler-editor-role
+rules:
+- apiGroups:
+ - experimental.kubeblocks.io
+ resources:
+ - nodecountscalers
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - experimental.kubeblocks.io
+ resources:
+ - nodecountscalers/status
+ verbs:
+ - get
diff --git a/deploy/helm/templates/rbac/experimental_nodecountscaler_viewer_role.yaml b/deploy/helm/templates/rbac/experimental_nodecountscaler_viewer_role.yaml
new file mode 100644
index 00000000000..a129b2b0154
--- /dev/null
+++ b/deploy/helm/templates/rbac/experimental_nodecountscaler_viewer_role.yaml
@@ -0,0 +1,22 @@
+# permissions for end users to view nodecountscalers.
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ {{- include "kubeblocks.labels" . | nindent 4 }}
+ name: {{ include "kubeblocks.fullname" . }}-nodecountscaler-viewer-role
+rules:
+- apiGroups:
+ - experimental.kubeblocks.io
+ resources:
+ - nodecountscalers
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - experimental.kubeblocks.io
+ resources:
+ - nodecountscalers/status
+ verbs:
+ - get
diff --git a/deploy/helm/templates/rbac/workloads_instanceset_editor_role.yaml b/deploy/helm/templates/rbac/workloads_instanceset_editor_role.yaml
index de1b60c8b9f..20dc6a0bf69 100644
--- a/deploy/helm/templates/rbac/workloads_instanceset_editor_role.yaml
+++ b/deploy/helm/templates/rbac/workloads_instanceset_editor_role.yaml
@@ -1,4 +1,4 @@
-# permissions for end users to edit replicatedstatemachines.
+# permissions for end users to edit instancesets.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
diff --git a/deploy/helm/templates/rbac/workloads_instanceset_viewer_role.yaml b/deploy/helm/templates/rbac/workloads_instanceset_viewer_role.yaml
index 34fd0faed78..416eeba2a2f 100644
--- a/deploy/helm/templates/rbac/workloads_instanceset_viewer_role.yaml
+++ b/deploy/helm/templates/rbac/workloads_instanceset_viewer_role.yaml
@@ -1,4 +1,4 @@
-# permissions for end users to view replicatedstatemachines.
+# permissions for end users to view instancesets.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
diff --git a/deploy/helm/values.yaml b/deploy/helm/values.yaml
index 06fcdb6ebbc..60f5aa6d1bd 100644
--- a/deploy/helm/values.yaml
+++ b/deploy/helm/values.yaml
@@ -1915,3 +1915,7 @@ hostPorts:
- "10259"
- "2379-2380"
- "30000-32767"
+
+controllers:
+ experimental:
+ enabled: false
diff --git a/pkg/controller/builder/builder_cluster.go b/pkg/controller/builder/builder_cluster.go
new file mode 100644
index 00000000000..c73597dc80b
--- /dev/null
+++ b/pkg/controller/builder/builder_cluster.go
@@ -0,0 +1,39 @@
+/*
+Copyright (C) 2022-2024 ApeCloud Co., Ltd
+
+This file is part of KubeBlocks project
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU Affero General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU Affero General Public License for more details.
+
+You should have received a copy of the GNU Affero General Public License
+along with this program. If not, see .
+*/
+
+package builder
+
+import (
+ appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1"
+)
+
+type ClusterBuilder struct {
+ BaseBuilder[appsv1alpha1.Cluster, *appsv1alpha1.Cluster, ClusterBuilder]
+}
+
+func NewClusterBuilder(namespace, name string) *ClusterBuilder {
+ builder := &ClusterBuilder{}
+ builder.init(namespace, name, &appsv1alpha1.Cluster{}, builder)
+ return builder
+}
+
+func (builder *ClusterBuilder) SetComponentSpecs(specs []appsv1alpha1.ClusterComponentSpec) *ClusterBuilder {
+ builder.get().Spec.ComponentSpecs = specs
+ return builder
+}
diff --git a/pkg/controller/builder/builder_cluster_test.go b/pkg/controller/builder/builder_cluster_test.go
new file mode 100644
index 00000000000..26f86b944b0
--- /dev/null
+++ b/pkg/controller/builder/builder_cluster_test.go
@@ -0,0 +1,50 @@
+/*
+Copyright (C) 2022-2024 ApeCloud Co., Ltd
+
+This file is part of KubeBlocks project
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU Affero General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU Affero General Public License for more details.
+
+You should have received a copy of the GNU Affero General Public License
+along with this program. If not, see .
+*/
+
+package builder
+
+import (
+ appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1"
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("cluster builder", func() {
+ It("should work well", func() {
+ const (
+ name = "foo"
+ ns = "default"
+ )
+ specs := []appsv1alpha1.ClusterComponentSpec{
+ {
+ Name: "foo-0",
+ },
+ {
+ Name: "foo-1",
+ },
+ }
+ cluster := NewClusterBuilder(ns, name).
+ SetComponentSpecs(specs).
+ GetObject()
+
+ Expect(cluster.Name).Should(Equal(name))
+ Expect(cluster.Namespace).Should(Equal(ns))
+ Expect(cluster.Spec.ComponentSpecs).Should(Equal(specs))
+ })
+})
diff --git a/pkg/controller/builder/builder_instance_set_test.go b/pkg/controller/builder/builder_instance_set_test.go
index a22052a1648..2b26881e61e 100644
--- a/pkg/controller/builder/builder_instance_set_test.go
+++ b/pkg/controller/builder/builder_instance_set_test.go
@@ -32,7 +32,7 @@ import (
workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1"
)
-var _ = Describe("replicated_state_machine builder", func() {
+var _ = Describe("instance_set builder", func() {
It("should work well", func() {
const (
name = "foo"
diff --git a/pkg/controller/builder/builder_node_count_scaler.go b/pkg/controller/builder/builder_node_count_scaler.go
new file mode 100644
index 00000000000..b5b2e208113
--- /dev/null
+++ b/pkg/controller/builder/builder_node_count_scaler.go
@@ -0,0 +1,44 @@
+/*
+Copyright (C) 2022-2024 ApeCloud Co., Ltd
+
+This file is part of KubeBlocks project
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU Affero General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU Affero General Public License for more details.
+
+You should have received a copy of the GNU Affero General Public License
+along with this program. If not, see .
+*/
+
+package builder
+
+import (
+ experimental "github.com/apecloud/kubeblocks/apis/experimental/v1alpha1"
+)
+
+type NodeCountScalerBuilder struct {
+ BaseBuilder[experimental.NodeCountScaler, *experimental.NodeCountScaler, NodeCountScalerBuilder]
+}
+
+func NewNodeCountScalerBuilder(namespace, name string) *NodeCountScalerBuilder {
+ builder := &NodeCountScalerBuilder{}
+ builder.init(namespace, name, &experimental.NodeCountScaler{}, builder)
+ return builder
+}
+
+func (builder *NodeCountScalerBuilder) SetTargetClusterName(clusterName string) *NodeCountScalerBuilder {
+ builder.get().Spec.TargetClusterName = clusterName
+ return builder
+}
+
+func (builder *NodeCountScalerBuilder) SetTargetComponentNames(componentNames []string) *NodeCountScalerBuilder {
+ builder.get().Spec.TargetComponentNames = componentNames
+ return builder
+}
diff --git a/pkg/controller/builder/builder_node_count_scaler_test.go b/pkg/controller/builder/builder_node_count_scaler_test.go
new file mode 100644
index 00000000000..45bbc2d48d6
--- /dev/null
+++ b/pkg/controller/builder/builder_node_count_scaler_test.go
@@ -0,0 +1,46 @@
+/*
+Copyright (C) 2022-2024 ApeCloud Co., Ltd
+
+This file is part of KubeBlocks project
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU Affero General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU Affero General Public License for more details.
+
+You should have received a copy of the GNU Affero General Public License
+along with this program. If not, see .
+*/
+
+package builder
+
+import (
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("node_count_scaler builder", func() {
+ It("should work well", func() {
+ const (
+ name = "foo"
+ ns = "default"
+ )
+ clusterName := "target-cluster-name"
+ componentNames := []string{"comp-1", "comp-2"}
+
+ ncs := NewNodeCountScalerBuilder(ns, name).
+ SetTargetClusterName(clusterName).
+ SetTargetComponentNames(componentNames).
+ GetObject()
+
+ Expect(ncs.Name).Should(Equal(name))
+ Expect(ncs.Namespace).Should(Equal(ns))
+ Expect(ncs.Spec.TargetClusterName).Should(Equal(clusterName))
+ Expect(ncs.Spec.TargetComponentNames).Should(Equal(componentNames))
+ })
+})
diff --git a/pkg/controller/instanceset/in_place_update_util.go b/pkg/controller/instanceset/in_place_update_util.go
index 0469f4fee3c..868c9a7c156 100644
--- a/pkg/controller/instanceset/in_place_update_util.go
+++ b/pkg/controller/instanceset/in_place_update_util.go
@@ -33,6 +33,7 @@ import (
workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1"
"github.com/apecloud/kubeblocks/pkg/constant"
+ intctrlutil "github.com/apecloud/kubeblocks/pkg/controllerutil"
"github.com/apecloud/kubeblocks/pkg/dataprotection/utils"
viper "github.com/apecloud/kubeblocks/pkg/viperx"
)
@@ -131,7 +132,7 @@ func mergeInPlaceFields(src, dst *corev1.Pod) {
dst.Spec.ActiveDeadlineSeconds = src.Spec.ActiveDeadlineSeconds
// according to the Pod API spec, tolerations can only be appended.
// means old tolerations must be in new toleration list.
- mergeList(&src.Spec.Tolerations, &dst.Spec.Tolerations, func(item corev1.Toleration) func(corev1.Toleration) bool {
+ intctrlutil.MergeList(&src.Spec.Tolerations, &dst.Spec.Tolerations, func(item corev1.Toleration) func(corev1.Toleration) bool {
return func(t corev1.Toleration) bool {
return reflect.DeepEqual(item, t)
}
diff --git a/pkg/controller/instanceset/instance_util.go b/pkg/controller/instanceset/instance_util.go
index 92f7ecb108b..def29a6691c 100644
--- a/pkg/controller/instanceset/instance_util.go
+++ b/pkg/controller/instanceset/instance_util.go
@@ -43,6 +43,7 @@ import (
"github.com/apecloud/kubeblocks/pkg/controller/builder"
"github.com/apecloud/kubeblocks/pkg/controller/kubebuilderx"
"github.com/apecloud/kubeblocks/pkg/controller/model"
+ intctrlutil "github.com/apecloud/kubeblocks/pkg/controllerutil"
)
type InstanceTemplate interface {
@@ -322,7 +323,7 @@ func buildInstanceByTemplate(name string, template *instanceTemplateExt, parent
}).GetObject()
volumeList = append(volumeList, *volume)
}
- mergeList(&volumeList, &pod.Spec.Volumes, func(item corev1.Volume) func(corev1.Volume) bool {
+ intctrlutil.MergeList(&volumeList, &pod.Spec.Volumes, func(item corev1.Volume) func(corev1.Volume) bool {
return func(v corev1.Volume) bool {
return v.Name == item.Name
}
@@ -602,7 +603,7 @@ func buildInstanceTemplateExt(template workloads.InstanceTemplate, templateExt *
mergeCPUNMemory(&src.Requests, &dst.Requests)
}
if template.Env != nil {
- mergeList(&template.Env, &templateExt.Spec.Containers[0].Env,
+ intctrlutil.MergeList(&template.Env, &templateExt.Spec.Containers[0].Env,
func(item corev1.EnvVar) func(corev1.EnvVar) bool {
return func(env corev1.EnvVar) bool {
return env.Name == item.Name
@@ -610,25 +611,25 @@ func buildInstanceTemplateExt(template workloads.InstanceTemplate, templateExt *
})
}
}
- mergeList(&template.Tolerations, &templateExt.Spec.Tolerations,
+ intctrlutil.MergeList(&template.Tolerations, &templateExt.Spec.Tolerations,
func(item corev1.Toleration) func(corev1.Toleration) bool {
return func(t corev1.Toleration) bool {
return reflect.DeepEqual(item, t)
}
})
- mergeList(&template.Volumes, &templateExt.Spec.Volumes,
+ intctrlutil.MergeList(&template.Volumes, &templateExt.Spec.Volumes,
func(item corev1.Volume) func(corev1.Volume) bool {
return func(v corev1.Volume) bool {
return v.Name == item.Name
}
})
- mergeList(&template.VolumeMounts, &templateExt.Spec.Containers[0].VolumeMounts,
+ intctrlutil.MergeList(&template.VolumeMounts, &templateExt.Spec.Containers[0].VolumeMounts,
func(item corev1.VolumeMount) func(corev1.VolumeMount) bool {
return func(vm corev1.VolumeMount) bool {
return vm.Name == item.Name
}
})
- mergeList(&template.VolumeClaimTemplates, &templateExt.VolumeClaimTemplates,
+ intctrlutil.MergeList(&template.VolumeClaimTemplates, &templateExt.VolumeClaimTemplates,
func(item corev1.PersistentVolumeClaim) func(corev1.PersistentVolumeClaim) bool {
return func(claim corev1.PersistentVolumeClaim) bool {
return claim.Name == item.Name
diff --git a/pkg/controller/instanceset/utils.go b/pkg/controller/instanceset/utils.go
index cef83cf416f..0a45f2f60b4 100644
--- a/pkg/controller/instanceset/utils.go
+++ b/pkg/controller/instanceset/utils.go
@@ -23,7 +23,6 @@ import (
"fmt"
"strings"
- "golang.org/x/exp/slices"
corev1 "k8s.io/api/core/v1"
workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1"
@@ -197,21 +196,6 @@ func mergeMap[K comparable, V any](src, dst *map[K]V) {
}
}
-func mergeList[E any](src, dst *[]E, f func(E) func(E) bool) {
- if len(*src) == 0 {
- return
- }
- for i := range *src {
- item := (*src)[i]
- index := slices.IndexFunc(*dst, f(item))
- if index >= 0 {
- (*dst)[index] = item
- } else {
- *dst = append(*dst, item)
- }
- }
-}
-
func getMatchLabels(name string) map[string]string {
return map[string]string{
WorkloadsManagedByLabelKey: workloads.Kind,
diff --git a/pkg/controller/instanceset/utils_test.go b/pkg/controller/instanceset/utils_test.go
index 7d0c237edf1..e3e6acacfda 100644
--- a/pkg/controller/instanceset/utils_test.go
+++ b/pkg/controller/instanceset/utils_test.go
@@ -25,7 +25,6 @@ import (
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
- "golang.org/x/exp/slices"
corev1 "k8s.io/api/core/v1"
"github.com/apecloud/kubeblocks/pkg/controller/builder"
@@ -40,62 +39,6 @@ var _ = Describe("utils test", func() {
priorityMap = ComposeRolePriorityMap(its.Spec.Roles)
})
- Context("mergeList", func() {
- It("should work well", func() {
- src := []corev1.Volume{
- {
- Name: "pvc1",
- VolumeSource: corev1.VolumeSource{
- PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
- ClaimName: "pvc1-pod-0",
- },
- },
- },
- {
- Name: "pvc2",
- VolumeSource: corev1.VolumeSource{
- PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
- ClaimName: "pvc2-pod-0",
- },
- },
- },
- }
- dst := []corev1.Volume{
- {
- Name: "pvc0",
- VolumeSource: corev1.VolumeSource{
- PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
- ClaimName: "pvc0-pod-0",
- },
- },
- },
- {
- Name: "pvc1",
- VolumeSource: corev1.VolumeSource{
- PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
- ClaimName: "pvc-pod-0",
- },
- },
- },
- }
- mergeList(&src, &dst, func(v corev1.Volume) func(corev1.Volume) bool {
- return func(volume corev1.Volume) bool {
- return v.Name == volume.Name
- }
- })
-
- Expect(dst).Should(HaveLen(3))
- slices.SortStableFunc(dst, func(a, b corev1.Volume) bool {
- return a.Name < b.Name
- })
- Expect(dst[0].Name).Should(Equal("pvc0"))
- Expect(dst[1].Name).Should(Equal("pvc1"))
- Expect(dst[1].PersistentVolumeClaim).ShouldNot(BeNil())
- Expect(dst[1].PersistentVolumeClaim.ClaimName).Should(Equal("pvc1-pod-0"))
- Expect(dst[2].Name).Should(Equal("pvc2"))
- })
- })
-
Context("mergeMap", func() {
It("should work well", func() {
src := map[string]string{
diff --git a/pkg/controllerutil/util.go b/pkg/controllerutil/util.go
index d56c81bef08..670fefd63c2 100644
--- a/pkg/controllerutil/util.go
+++ b/pkg/controllerutil/util.go
@@ -23,6 +23,7 @@ import (
"context"
"reflect"
+ "golang.org/x/exp/slices"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
@@ -183,3 +184,18 @@ func DeleteOwnedResources[T generics.Object, PT generics.PObject[T], L generics.
}
return nil
}
+
+func MergeList[E any](src, dst *[]E, f func(E) func(E) bool) {
+ if len(*src) == 0 {
+ return
+ }
+ for i := range *src {
+ item := (*src)[i]
+ index := slices.IndexFunc(*dst, f(item))
+ if index >= 0 {
+ (*dst)[index] = item
+ } else {
+ *dst = append(*dst, item)
+ }
+ }
+}
diff --git a/pkg/controllerutil/util_test.go b/pkg/controllerutil/util_test.go
index 946dcfa434b..40093458320 100644
--- a/pkg/controllerutil/util_test.go
+++ b/pkg/controllerutil/util_test.go
@@ -23,9 +23,72 @@ import (
"context"
"testing"
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+
+ "golang.org/x/exp/slices"
+ corev1 "k8s.io/api/core/v1"
"k8s.io/client-go/tools/record"
)
+var _ = Describe("utils test", func() {
+ Context("MergeList", func() {
+ It("should work well", func() {
+ src := []corev1.Volume{
+ {
+ Name: "pvc1",
+ VolumeSource: corev1.VolumeSource{
+ PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
+ ClaimName: "pvc1-pod-0",
+ },
+ },
+ },
+ {
+ Name: "pvc2",
+ VolumeSource: corev1.VolumeSource{
+ PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
+ ClaimName: "pvc2-pod-0",
+ },
+ },
+ },
+ }
+ dst := []corev1.Volume{
+ {
+ Name: "pvc0",
+ VolumeSource: corev1.VolumeSource{
+ PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
+ ClaimName: "pvc0-pod-0",
+ },
+ },
+ },
+ {
+ Name: "pvc1",
+ VolumeSource: corev1.VolumeSource{
+ PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
+ ClaimName: "pvc-pod-0",
+ },
+ },
+ },
+ }
+ MergeList(&src, &dst, func(v corev1.Volume) func(corev1.Volume) bool {
+ return func(volume corev1.Volume) bool {
+ return v.Name == volume.Name
+ }
+ })
+
+ Expect(dst).Should(HaveLen(3))
+ slices.SortStableFunc(dst, func(a, b corev1.Volume) bool {
+ return a.Name < b.Name
+ })
+ Expect(dst[0].Name).Should(Equal("pvc0"))
+ Expect(dst[1].Name).Should(Equal("pvc1"))
+ Expect(dst[1].PersistentVolumeClaim).ShouldNot(BeNil())
+ Expect(dst[1].PersistentVolumeClaim.ClaimName).Should(Equal("pvc1-pod-0"))
+ Expect(dst[2].Name).Should(Equal("pvc2"))
+ })
+ })
+})
+
func TestGetUncachedObjects(t *testing.T) {
GetUncachedObjects()
}