From 913f9c17727721e9ea05a5cb53de3da60522df28 Mon Sep 17 00:00:00 2001 From: free6om Date: Mon, 6 May 2024 14:20:32 +0800 Subject: [PATCH 01/14] scaffold done --- PROJECT | 9 ++ .../v1alpha1/groupversion_info.go | 39 ++++++ .../v1alpha1/nodeawarescaler_types.go | 67 ++++++++++ .../v1alpha1/zz_generated.deepcopy.go | 118 ++++++++++++++++++ cmd/manager/main.go | 23 +++- config/crd/kustomization.yaml | 3 + ...tion_in_experimental_nodeawarescalers.yaml | 7 ++ ...hook_in_experimental_nodeawarescalers.yaml | 16 +++ ...erimental_nodeawarescaler_editor_role.yaml | 31 +++++ ...erimental_nodeawarescaler_viewer_role.yaml | 27 ++++ ...experimental_v1alpha1_nodeawarescaler.yaml | 12 ++ .../experimental/node_scaling_handler.go | 60 +++++++++ .../nodeawarescaler_controller.go | 68 ++++++++++ controllers/experimental/suite_test.go | 83 ++++++++++++ deploy/helm/templates/deployment.yaml | 4 + deploy/helm/values.yaml | 4 + 16 files changed, 568 insertions(+), 3 deletions(-) create mode 100644 apis/experimental/v1alpha1/groupversion_info.go create mode 100644 apis/experimental/v1alpha1/nodeawarescaler_types.go create mode 100644 apis/experimental/v1alpha1/zz_generated.deepcopy.go create mode 100644 config/crd/patches/cainjection_in_experimental_nodeawarescalers.yaml create mode 100644 config/crd/patches/webhook_in_experimental_nodeawarescalers.yaml create mode 100644 config/rbac/experimental_nodeawarescaler_editor_role.yaml create mode 100644 config/rbac/experimental_nodeawarescaler_viewer_role.yaml create mode 100644 config/samples/experimental_v1alpha1_nodeawarescaler.yaml create mode 100644 controllers/experimental/node_scaling_handler.go create mode 100644 controllers/experimental/nodeawarescaler_controller.go create mode 100644 controllers/experimental/suite_test.go diff --git a/PROJECT b/PROJECT index d89f2c28c43..a5988cade67 100644 --- a/PROJECT +++ b/PROJECT @@ -246,4 +246,13 @@ resources: kind: StorageProvider path: github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1 version: v1alpha1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: kubeblocks.io + group: experimental + kind: NodeAwareScaler + path: github.com/apecloud/kubeblocks/apis/experimental/v1alpha1 + version: v1alpha1 version: "3" diff --git a/apis/experimental/v1alpha1/groupversion_info.go b/apis/experimental/v1alpha1/groupversion_info.go new file mode 100644 index 00000000000..9659fce5377 --- /dev/null +++ b/apis/experimental/v1alpha1/groupversion_info.go @@ -0,0 +1,39 @@ +/* +Copyright (C) 2022-2024 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +// Package v1alpha1 contains API Schema definitions for the experimental v1alpha1 API group +// +kubebuilder:object:generate=true +// +groupName=experimental.kubeblocks.io +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "experimental.kubeblocks.io", Version: "v1alpha1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/experimental/v1alpha1/nodeawarescaler_types.go b/apis/experimental/v1alpha1/nodeawarescaler_types.go new file mode 100644 index 00000000000..5c0345b5c9f --- /dev/null +++ b/apis/experimental/v1alpha1/nodeawarescaler_types.go @@ -0,0 +1,67 @@ +/* +Copyright (C) 2022-2024 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// NodeAwareScalerSpec defines the desired state of NodeAwareScaler +type NodeAwareScalerSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // Foo is an example field of NodeAwareScaler. Edit nodeawarescaler_types.go to remove/update + Foo string `json:"foo,omitempty"` +} + +// NodeAwareScalerStatus defines the observed state of NodeAwareScaler +type NodeAwareScalerStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status + +// NodeAwareScaler is the Schema for the nodeawarescalers API +type NodeAwareScaler struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec NodeAwareScalerSpec `json:"spec,omitempty"` + Status NodeAwareScalerStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// NodeAwareScalerList contains a list of NodeAwareScaler +type NodeAwareScalerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []NodeAwareScaler `json:"items"` +} + +func init() { + SchemeBuilder.Register(&NodeAwareScaler{}, &NodeAwareScalerList{}) +} diff --git a/apis/experimental/v1alpha1/zz_generated.deepcopy.go b/apis/experimental/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 00000000000..0c3620407f1 --- /dev/null +++ b/apis/experimental/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,118 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright (C) 2022-2024 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeAwareScaler) DeepCopyInto(out *NodeAwareScaler) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAwareScaler. +func (in *NodeAwareScaler) DeepCopy() *NodeAwareScaler { + if in == nil { + return nil + } + out := new(NodeAwareScaler) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NodeAwareScaler) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeAwareScalerList) DeepCopyInto(out *NodeAwareScalerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NodeAwareScaler, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAwareScalerList. +func (in *NodeAwareScalerList) DeepCopy() *NodeAwareScalerList { + if in == nil { + return nil + } + out := new(NodeAwareScalerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NodeAwareScalerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeAwareScalerSpec) DeepCopyInto(out *NodeAwareScalerSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAwareScalerSpec. +func (in *NodeAwareScalerSpec) DeepCopy() *NodeAwareScalerSpec { + if in == nil { + return nil + } + out := new(NodeAwareScalerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeAwareScalerStatus) DeepCopyInto(out *NodeAwareScalerStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAwareScalerStatus. +func (in *NodeAwareScalerStatus) DeepCopy() *NodeAwareScalerStatus { + if in == nil { + return nil + } + out := new(NodeAwareScalerStatus) + in.DeepCopyInto(out) + return out +} diff --git a/cmd/manager/main.go b/cmd/manager/main.go index d374ddbaf8b..a1910572117 100644 --- a/cmd/manager/main.go +++ b/cmd/manager/main.go @@ -47,6 +47,9 @@ import ( "sigs.k8s.io/controller-runtime/pkg/metrics/server" "sigs.k8s.io/controller-runtime/pkg/webhook" + experimentalv1alpha1 "github.com/apecloud/kubeblocks/apis/experimental/v1alpha1" + experimentalcontrollers "github.com/apecloud/kubeblocks/controllers/experimental" + // +kubebuilder:scaffold:imports appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" @@ -81,9 +84,10 @@ const ( leaderElectIDFlagKey flagName = "leader-elect-id" // switch flags key for API groups - appsFlagKey flagName = "apps" - extensionsFlagKey flagName = "extensions" - workloadsFlagKey flagName = "workloads" + appsFlagKey flagName = "apps" + extensionsFlagKey flagName = "extensions" + workloadsFlagKey flagName = "workloads" + experimentalFlagKey flagName = "experimental" multiClusterKubeConfigFlagKey flagName = "multi-cluster-kubeconfig" multiClusterContextsFlagKey flagName = "multi-cluster-contexts" @@ -108,6 +112,7 @@ func init() { utilruntime.Must(appsv1beta1.AddToScheme(scheme)) utilruntime.Must(legacy.AddToScheme(scheme)) utilruntime.Must(apiextv1.AddToScheme(scheme)) + utilruntime.Must(experimentalv1alpha1.AddToScheme(scheme)) // +kubebuilder:scaffold:scheme viper.SetConfigName("config") // name of config file (without extension) @@ -168,6 +173,8 @@ func setupFlags() { "Enable the extensions controller manager.") flag.Bool(workloadsFlagKey.String(), true, "Enable the workloads controller manager.") + flag.Bool(experimentalFlagKey.String(), false, + "Enable the experimental controller manager.") flag.String(multiClusterKubeConfigFlagKey.String(), "", "Paths to the kubeconfig for multi-cluster accessing.") flag.String(multiClusterContextsFlagKey.String(), "", "Kube contexts the manager will talk to.") @@ -510,6 +517,16 @@ func main() { os.Exit(1) } } + + if viper.GetBool(experimentalFlagKey.viperName()) { + if err = (&experimentalcontrollers.NodeAwareScalerReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "NodeAwareScaler") + os.Exit(1) + } + } // +kubebuilder:scaffold:builder if viper.GetBool("enable_webhooks") { diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index cd05b177030..b10d341efd9 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -24,6 +24,7 @@ resources: - bases/apps.kubeblocks.io_opsdefinitions.yaml - bases/apps.kubeblocks.io_componentversions.yaml - bases/dataprotection.kubeblocks.io_storageproviders.yaml +- bases/experimental.kubeblocks.io_nodeawarescalers.yaml #+kubebuilder:scaffold:crdkustomizeresource patchesStrategicMerge: @@ -52,6 +53,7 @@ patchesStrategicMerge: #- patches/webhook_in_components.yaml #- patches/webhook_in_opsdefinitions.yaml #- patches/webhook_in_componentversions.yaml +#- patches/webhook_in_nodeawarescalers.yaml #+kubebuilder:scaffold:crdkustomizewebhookpatch # [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix. @@ -79,6 +81,7 @@ patchesStrategicMerge: #- patches/cainjection_in_components.yaml #- patches/cainjection_in_opsdefinitions.yaml #- patches/cainjection_in_componentversions.yaml +#- patches/cainjection_in_nodeawarescalers.yaml #+kubebuilder:scaffold:crdkustomizecainjectionpatch # the following config is for teaching kustomize how to do kustomization for CRDs. diff --git a/config/crd/patches/cainjection_in_experimental_nodeawarescalers.yaml b/config/crd/patches/cainjection_in_experimental_nodeawarescalers.yaml new file mode 100644 index 00000000000..82bccd6f467 --- /dev/null +++ b/config/crd/patches/cainjection_in_experimental_nodeawarescalers.yaml @@ -0,0 +1,7 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: nodeawarescalers.experimental.kubeblocks.io diff --git a/config/crd/patches/webhook_in_experimental_nodeawarescalers.yaml b/config/crd/patches/webhook_in_experimental_nodeawarescalers.yaml new file mode 100644 index 00000000000..20fb5af220f --- /dev/null +++ b/config/crd/patches/webhook_in_experimental_nodeawarescalers.yaml @@ -0,0 +1,16 @@ +# The following patch enables a conversion webhook for the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: nodeawarescalers.experimental.kubeblocks.io +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + namespace: system + name: webhook-service + path: /convert + conversionReviewVersions: + - v1 diff --git a/config/rbac/experimental_nodeawarescaler_editor_role.yaml b/config/rbac/experimental_nodeawarescaler_editor_role.yaml new file mode 100644 index 00000000000..1153ec84285 --- /dev/null +++ b/config/rbac/experimental_nodeawarescaler_editor_role.yaml @@ -0,0 +1,31 @@ +# permissions for end users to edit nodeawarescalers. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: nodeawarescaler-editor-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: kubeblocks + app.kubernetes.io/part-of: kubeblocks + app.kubernetes.io/managed-by: kustomize + name: nodeawarescaler-editor-role +rules: +- apiGroups: + - experimental.kubeblocks.io + resources: + - nodeawarescalers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - experimental.kubeblocks.io + resources: + - nodeawarescalers/status + verbs: + - get diff --git a/config/rbac/experimental_nodeawarescaler_viewer_role.yaml b/config/rbac/experimental_nodeawarescaler_viewer_role.yaml new file mode 100644 index 00000000000..a1fba19a052 --- /dev/null +++ b/config/rbac/experimental_nodeawarescaler_viewer_role.yaml @@ -0,0 +1,27 @@ +# permissions for end users to view nodeawarescalers. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: nodeawarescaler-viewer-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: kubeblocks + app.kubernetes.io/part-of: kubeblocks + app.kubernetes.io/managed-by: kustomize + name: nodeawarescaler-viewer-role +rules: +- apiGroups: + - experimental.kubeblocks.io + resources: + - nodeawarescalers + verbs: + - get + - list + - watch +- apiGroups: + - experimental.kubeblocks.io + resources: + - nodeawarescalers/status + verbs: + - get diff --git a/config/samples/experimental_v1alpha1_nodeawarescaler.yaml b/config/samples/experimental_v1alpha1_nodeawarescaler.yaml new file mode 100644 index 00000000000..eab21dd30f7 --- /dev/null +++ b/config/samples/experimental_v1alpha1_nodeawarescaler.yaml @@ -0,0 +1,12 @@ +apiVersion: experimental.kubeblocks.io/v1alpha1 +kind: NodeAwareScaler +metadata: + labels: + app.kubernetes.io/name: nodeawarescaler + app.kubernetes.io/instance: nodeawarescaler-sample + app.kubernetes.io/part-of: kubeblocks + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/created-by: kubeblocks + name: nodeawarescaler-sample +spec: + # TODO(user): Add fields here diff --git a/controllers/experimental/node_scaling_handler.go b/controllers/experimental/node_scaling_handler.go new file mode 100644 index 00000000000..a75b80eba92 --- /dev/null +++ b/controllers/experimental/node_scaling_handler.go @@ -0,0 +1,60 @@ +/* +Copyright (C) 2022-2024 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package experimental + +import ( + "context" + + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + + experimental "github.com/apecloud/kubeblocks/apis/experimental/v1alpha1" +) + +type nodeScalingHandler struct { + client.Client +} + +func (h *nodeScalingHandler) Create(ctx context.Context, event event.CreateEvent, limitingInterface workqueue.RateLimitingInterface) { + h.mapAndEnqueue(ctx, limitingInterface) +} + +func (h *nodeScalingHandler) Update(ctx context.Context, event event.UpdateEvent, limitingInterface workqueue.RateLimitingInterface) { +} + +func (h *nodeScalingHandler) Delete(ctx context.Context, event event.DeleteEvent, limitingInterface workqueue.RateLimitingInterface) { + h.mapAndEnqueue(ctx, limitingInterface) +} + +func (h *nodeScalingHandler) Generic(ctx context.Context, event event.GenericEvent, limitingInterface workqueue.RateLimitingInterface) { +} + +func (h *nodeScalingHandler) mapAndEnqueue(ctx context.Context, q workqueue.RateLimitingInterface) { + scalerList := &experimental.NodeAwareScalerList{} + if err := h.Client.List(ctx, scalerList); err == nil { + for _, item := range scalerList.Items { + q.Add(client.ObjectKeyFromObject(&item)) + } + } +} + +var _ handler.EventHandler = &nodeScalingHandler{} diff --git a/controllers/experimental/nodeawarescaler_controller.go b/controllers/experimental/nodeawarescaler_controller.go new file mode 100644 index 00000000000..2ffbbf8a0b4 --- /dev/null +++ b/controllers/experimental/nodeawarescaler_controller.go @@ -0,0 +1,68 @@ +/* +Copyright (C) 2022-2024 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package experimental + +import ( + "context" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + + experimentalv1alpha1 "github.com/apecloud/kubeblocks/apis/experimental/v1alpha1" +) + +// NodeAwareScalerReconciler reconciles a NodeAwareScaler object +type NodeAwareScalerReconciler struct { + client.Client + Scheme *runtime.Scheme +} + +//+kubebuilder:rbac:groups=experimental.kubeblocks.io,resources=nodeawarescalers,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=experimental.kubeblocks.io,resources=nodeawarescalers/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=experimental.kubeblocks.io,resources=nodeawarescalers/finalizers,verbs=update + +//+kubebuilder:rbac:groups="",resources=nodes,verbs=list;watch + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// TODO(user): Modify the Reconcile function to compare the state specified by +// the NodeAwareScaler object against the actual cluster state, and then +// perform operations to make the cluster state reflect the state specified by +// the user. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.14.4/pkg/reconcile +func (r *NodeAwareScalerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + _ = log.FromContext(ctx) + + // TODO(user): your logic here + + return ctrl.Result{}, nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *NodeAwareScalerReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&experimentalv1alpha1.NodeAwareScaler{}). + Watches(&corev1.Node{}, &nodeScalingHandler{r.Client}). + Complete(r) +} diff --git a/controllers/experimental/suite_test.go b/controllers/experimental/suite_test.go new file mode 100644 index 00000000000..2a704ce1fac --- /dev/null +++ b/controllers/experimental/suite_test.go @@ -0,0 +1,83 @@ +/* +Copyright (C) 2022-2024 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package experimental + +import ( + "path/filepath" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + experimentalv1alpha1 "github.com/apecloud/kubeblocks/apis/experimental/v1alpha1" + //+kubebuilder:scaffold:imports +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +var cfg *rest.Config +var k8sClient client.Client +var testEnv *envtest.Environment + +func TestAPIs(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "Controller Suite") +} + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + By("bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")}, + ErrorIfCRDPathMissing: true, + } + + var err error + // cfg is defined in this file globally. + cfg, err = testEnv.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + err = experimentalv1alpha1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + //+kubebuilder:scaffold:scheme + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient).NotTo(BeNil()) + +}) + +var _ = AfterSuite(func() { + By("tearing down the test environment") + err := testEnv.Stop() + Expect(err).NotTo(HaveOccurred()) +}) diff --git a/deploy/helm/templates/deployment.yaml b/deploy/helm/templates/deployment.yaml index 4c291b5fa15..33c59c9a130 100644 --- a/deploy/helm/templates/deployment.yaml +++ b/deploy/helm/templates/deployment.yaml @@ -161,6 +161,10 @@ spec: - name: ENABLED_RUNTIME_METRICS value: "true" {{- end }} + {{- if .Values.controllers.experimental.enabled }} + - name: experimental + value: "true" + {{- end }} {{- with .Values.securityContext }} securityContext: {{- toYaml . | nindent 12 }} diff --git a/deploy/helm/values.yaml b/deploy/helm/values.yaml index 06fcdb6ebbc..60f5aa6d1bd 100644 --- a/deploy/helm/values.yaml +++ b/deploy/helm/values.yaml @@ -1915,3 +1915,7 @@ hostPorts: - "10259" - "2379-2380" - "30000-32767" + +controllers: + experimental: + enabled: false From c105108cf7b3f6519435c3b84a3fe41b752f2066 Mon Sep 17 00:00:00 2001 From: free6om Date: Mon, 6 May 2024 17:20:54 +0800 Subject: [PATCH 02/14] API done --- .../v1alpha1/nodeawarescaler_types.go | 53 +++++- ...mental.kubeblocks.io_nodeawarescalers.yaml | 174 ++++++++++++++++++ config/rbac/role.yaml | 33 ++++ .../workloads_instanceset_editor_role.yaml | 2 +- .../workloads_instanceset_viewer_role.yaml | 2 +- deploy/helm/config/rbac/role.yaml | 33 ++++ ...mental.kubeblocks.io_nodeawarescalers.yaml | 174 ++++++++++++++++++ ...erimental_nodeawarescaler_editor_role.yaml | 26 +++ ...erimental_nodeawarescaler_viewer_role.yaml | 22 +++ .../workloads_instanceset_editor_role.yaml | 2 +- .../workloads_instanceset_viewer_role.yaml | 2 +- 11 files changed, 512 insertions(+), 11 deletions(-) create mode 100644 config/crd/bases/experimental.kubeblocks.io_nodeawarescalers.yaml create mode 100644 deploy/helm/crds/experimental.kubeblocks.io_nodeawarescalers.yaml create mode 100644 deploy/helm/templates/rbac/experimental_nodeawarescaler_editor_role.yaml create mode 100644 deploy/helm/templates/rbac/experimental_nodeawarescaler_viewer_role.yaml diff --git a/apis/experimental/v1alpha1/nodeawarescaler_types.go b/apis/experimental/v1alpha1/nodeawarescaler_types.go index 5c0345b5c9f..045d5c6a25e 100644 --- a/apis/experimental/v1alpha1/nodeawarescaler_types.go +++ b/apis/experimental/v1alpha1/nodeawarescaler_types.go @@ -28,17 +28,56 @@ import ( // NodeAwareScalerSpec defines the desired state of NodeAwareScaler type NodeAwareScalerSpec struct { - // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster - // Important: Run "make" to regenerate code after modifying this file - - // Foo is an example field of NodeAwareScaler. Edit nodeawarescaler_types.go to remove/update - Foo string `json:"foo,omitempty"` + // Specified the target Cluster name this scaler applies to. + TargetClusterName string `json:"targetName"` + + // Specified the target Component names this scaler applies to. + // All Components will be applied if not set. + // + // +optional + TargetComponentNames []string `json:"TargetComponentNames,omitempty"` } // NodeAwareScalerStatus defines the observed state of NodeAwareScaler type NodeAwareScalerStatus struct { - // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster - // Important: Run "make" to regenerate code after modifying this file + // Records the current status information of all Components specified in the NodeAwareScalerSpec. + // + // +optional + ComponentStatuses []ComponentStatus `json:"componentStatuses,omitempty"` + + // Represents the latest available observations of a nodeawarescaler's current state. + // Known .status.conditions.type are: "ScaleReady". + // ScaleReady - All target components are ready. + // + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` + + // LastScaleTime is the last time the NodeAwareScaler scaled the number of instances. + // + // +optional + LastScaleTime metav1.Time `json:"lastScaleTime,omitempty"` +} + +type ComponentStatus struct { + // Specified the Component name. + Name string `json:"name"` + + // The current number of instances of this component. + CurrentReplicas int32 `json:"currentReplicas"` + + // The number of instances of this component with a Ready condition. + ReadyReplicas int32 `json:"readyReplicas"` + + // The number of instances of this component with a Ready condition for at least MinReadySeconds defined in the instance template. + AvailableReplicas int32 `json:"availableReplicas"` + + // The desired number of instances of this component. + // Usually, it should be the number of nodes. + DesiredReplicas int32 `json:"desiredReplicas"` } //+kubebuilder:object:root=true diff --git a/config/crd/bases/experimental.kubeblocks.io_nodeawarescalers.yaml b/config/crd/bases/experimental.kubeblocks.io_nodeawarescalers.yaml new file mode 100644 index 00000000000..525c830679d --- /dev/null +++ b/config/crd/bases/experimental.kubeblocks.io_nodeawarescalers.yaml @@ -0,0 +1,174 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.12.1 + labels: + app.kubernetes.io/name: kubeblocks + name: nodeawarescalers.experimental.kubeblocks.io +spec: + group: experimental.kubeblocks.io + names: + kind: NodeAwareScaler + listKind: NodeAwareScalerList + plural: nodeawarescalers + singular: nodeawarescaler + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: NodeAwareScaler is the Schema for the nodeawarescalers API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: NodeAwareScalerSpec defines the desired state of NodeAwareScaler + properties: + TargetComponentNames: + description: Specified the target Component names this scaler applies + to. All Components will be applied if not set. + items: + type: string + type: array + targetName: + description: Specified the target Cluster name this scaler applies + to. + type: string + required: + - targetName + type: object + status: + description: NodeAwareScalerStatus defines the observed state of NodeAwareScaler + properties: + componentStatuses: + description: Records the current status information of all Components + specified in the NodeAwareScalerSpec. + items: + properties: + availableReplicas: + description: The number of instances of this component with + a Ready condition for at least MinReadySeconds defined in + the instance template. + format: int32 + type: integer + currentReplicas: + description: The current number of instances of this component. + format: int32 + type: integer + desiredReplicas: + description: The desired number of instances of this component. + Usually, it should be the number of nodes. + format: int32 + type: integer + name: + description: Specified the Component name. + type: string + readyReplicas: + description: The number of instances of this component with + a Ready condition. + format: int32 + type: integer + required: + - availableReplicas + - currentReplicas + - desiredReplicas + - name + - readyReplicas + type: object + type: array + conditions: + description: 'Represents the latest available observations of a nodeawarescaler''s + current state. Known .status.conditions.type are: "ScaleReady". + ScaleReady - All target components are ready.' + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + lastScaleTime: + description: LastScaleTime is the last time the NodeAwareScaler scaled + the number of instances. + format: date-time + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 2783d52f365..e3218c245bd 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -4,6 +4,13 @@ kind: ClusterRole metadata: name: manager-role rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch - apiGroups: - apiextensions.k8s.io resources: @@ -780,6 +787,32 @@ rules: - get - patch - update +- apiGroups: + - experimental.kubeblocks.io + resources: + - nodeawarescalers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - experimental.kubeblocks.io + resources: + - nodeawarescalers/finalizers + verbs: + - update +- apiGroups: + - experimental.kubeblocks.io + resources: + - nodeawarescalers/status + verbs: + - get + - patch + - update - apiGroups: - extensions.kubeblocks.io resources: diff --git a/config/rbac/workloads_instanceset_editor_role.yaml b/config/rbac/workloads_instanceset_editor_role.yaml index e0d02903e6c..f6e33e8c5d1 100644 --- a/config/rbac/workloads_instanceset_editor_role.yaml +++ b/config/rbac/workloads_instanceset_editor_role.yaml @@ -1,4 +1,4 @@ -# permissions for end users to edit replicatedstatemachines. +# permissions for end users to edit instancesets. apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: diff --git a/config/rbac/workloads_instanceset_viewer_role.yaml b/config/rbac/workloads_instanceset_viewer_role.yaml index 8fc6e7149f5..d23ab401c4d 100644 --- a/config/rbac/workloads_instanceset_viewer_role.yaml +++ b/config/rbac/workloads_instanceset_viewer_role.yaml @@ -1,4 +1,4 @@ -# permissions for end users to view replicatedstatemachines. +# permissions for end users to view instancesets. apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: diff --git a/deploy/helm/config/rbac/role.yaml b/deploy/helm/config/rbac/role.yaml index 2783d52f365..e3218c245bd 100644 --- a/deploy/helm/config/rbac/role.yaml +++ b/deploy/helm/config/rbac/role.yaml @@ -4,6 +4,13 @@ kind: ClusterRole metadata: name: manager-role rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch - apiGroups: - apiextensions.k8s.io resources: @@ -780,6 +787,32 @@ rules: - get - patch - update +- apiGroups: + - experimental.kubeblocks.io + resources: + - nodeawarescalers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - experimental.kubeblocks.io + resources: + - nodeawarescalers/finalizers + verbs: + - update +- apiGroups: + - experimental.kubeblocks.io + resources: + - nodeawarescalers/status + verbs: + - get + - patch + - update - apiGroups: - extensions.kubeblocks.io resources: diff --git a/deploy/helm/crds/experimental.kubeblocks.io_nodeawarescalers.yaml b/deploy/helm/crds/experimental.kubeblocks.io_nodeawarescalers.yaml new file mode 100644 index 00000000000..525c830679d --- /dev/null +++ b/deploy/helm/crds/experimental.kubeblocks.io_nodeawarescalers.yaml @@ -0,0 +1,174 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.12.1 + labels: + app.kubernetes.io/name: kubeblocks + name: nodeawarescalers.experimental.kubeblocks.io +spec: + group: experimental.kubeblocks.io + names: + kind: NodeAwareScaler + listKind: NodeAwareScalerList + plural: nodeawarescalers + singular: nodeawarescaler + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: NodeAwareScaler is the Schema for the nodeawarescalers API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: NodeAwareScalerSpec defines the desired state of NodeAwareScaler + properties: + TargetComponentNames: + description: Specified the target Component names this scaler applies + to. All Components will be applied if not set. + items: + type: string + type: array + targetName: + description: Specified the target Cluster name this scaler applies + to. + type: string + required: + - targetName + type: object + status: + description: NodeAwareScalerStatus defines the observed state of NodeAwareScaler + properties: + componentStatuses: + description: Records the current status information of all Components + specified in the NodeAwareScalerSpec. + items: + properties: + availableReplicas: + description: The number of instances of this component with + a Ready condition for at least MinReadySeconds defined in + the instance template. + format: int32 + type: integer + currentReplicas: + description: The current number of instances of this component. + format: int32 + type: integer + desiredReplicas: + description: The desired number of instances of this component. + Usually, it should be the number of nodes. + format: int32 + type: integer + name: + description: Specified the Component name. + type: string + readyReplicas: + description: The number of instances of this component with + a Ready condition. + format: int32 + type: integer + required: + - availableReplicas + - currentReplicas + - desiredReplicas + - name + - readyReplicas + type: object + type: array + conditions: + description: 'Represents the latest available observations of a nodeawarescaler''s + current state. Known .status.conditions.type are: "ScaleReady". + ScaleReady - All target components are ready.' + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + lastScaleTime: + description: LastScaleTime is the last time the NodeAwareScaler scaled + the number of instances. + format: date-time + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/deploy/helm/templates/rbac/experimental_nodeawarescaler_editor_role.yaml b/deploy/helm/templates/rbac/experimental_nodeawarescaler_editor_role.yaml new file mode 100644 index 00000000000..1466c26f1d2 --- /dev/null +++ b/deploy/helm/templates/rbac/experimental_nodeawarescaler_editor_role.yaml @@ -0,0 +1,26 @@ +# permissions for end users to edit nodeawarescalers. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + {{- include "kubeblocks.labels" . | nindent 4 }} + name: {{ include "kubeblocks.fullname" . }}-nodeawarescaler-editor-role +rules: +- apiGroups: + - experimental.kubeblocks.io + resources: + - nodeawarescalers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - experimental.kubeblocks.io + resources: + - nodeawarescalers/status + verbs: + - get diff --git a/deploy/helm/templates/rbac/experimental_nodeawarescaler_viewer_role.yaml b/deploy/helm/templates/rbac/experimental_nodeawarescaler_viewer_role.yaml new file mode 100644 index 00000000000..4cc51589760 --- /dev/null +++ b/deploy/helm/templates/rbac/experimental_nodeawarescaler_viewer_role.yaml @@ -0,0 +1,22 @@ +# permissions for end users to view nodeawarescalers. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + {{- include "kubeblocks.labels" . | nindent 4 }} + name: {{ include "kubeblocks.fullname" . }}-nodeawarescaler-viewer-role +rules: +- apiGroups: + - experimental.kubeblocks.io + resources: + - nodeawarescalers + verbs: + - get + - list + - watch +- apiGroups: + - experimental.kubeblocks.io + resources: + - nodeawarescalers/status + verbs: + - get diff --git a/deploy/helm/templates/rbac/workloads_instanceset_editor_role.yaml b/deploy/helm/templates/rbac/workloads_instanceset_editor_role.yaml index de1b60c8b9f..20dc6a0bf69 100644 --- a/deploy/helm/templates/rbac/workloads_instanceset_editor_role.yaml +++ b/deploy/helm/templates/rbac/workloads_instanceset_editor_role.yaml @@ -1,4 +1,4 @@ -# permissions for end users to edit replicatedstatemachines. +# permissions for end users to edit instancesets. apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: diff --git a/deploy/helm/templates/rbac/workloads_instanceset_viewer_role.yaml b/deploy/helm/templates/rbac/workloads_instanceset_viewer_role.yaml index 34fd0faed78..416eeba2a2f 100644 --- a/deploy/helm/templates/rbac/workloads_instanceset_viewer_role.yaml +++ b/deploy/helm/templates/rbac/workloads_instanceset_viewer_role.yaml @@ -1,4 +1,4 @@ -# permissions for end users to view replicatedstatemachines. +# permissions for end users to view instancesets. apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: From a96f049a80f9572cff6657fc36adfed54a5a0c11 Mon Sep 17 00:00:00 2001 From: free6om Date: Tue, 7 May 2024 13:53:33 +0800 Subject: [PATCH 03/14] ut done --- .../v1alpha1/nodeawarescaler_types.go | 24 +++- cmd/manager/main.go | 5 +- ...mental.kubeblocks.io_nodeawarescalers.yaml | 21 ++- .../nodeawarescaler_controller.go | 29 ++-- .../reconciler_scale_target_cluster.go | 84 +++++++++++ .../reconciler_scale_target_cluster_test.go | 61 ++++++++ .../experimental/reconciler_update_status.go | 132 ++++++++++++++++++ .../reconciler_update_status_test.go | 84 +++++++++++ controllers/experimental/suite_test.go | 55 ++++++++ controllers/experimental/tree_loader.go | 90 ++++++++++++ controllers/experimental/tree_loader_test.go | 113 +++++++++++++++ ...mental.kubeblocks.io_nodeawarescalers.yaml | 21 ++- pkg/controller/builder/builder_cluster.go | 39 ++++++ .../builder/builder_cluster_test.go | 50 +++++++ .../builder/builder_instance_set_test.go | 2 +- .../builder/builder_node_aware_scaler.go | 44 ++++++ .../builder/builder_node_aware_scaler_test.go | 46 ++++++ .../instanceset/in_place_update_util.go | 2 +- pkg/controller/instanceset/instance_util.go | 12 +- pkg/controller/instanceset/utils.go | 2 +- pkg/controller/instanceset/utils_test.go | 4 +- 21 files changed, 893 insertions(+), 27 deletions(-) create mode 100644 controllers/experimental/reconciler_scale_target_cluster.go create mode 100644 controllers/experimental/reconciler_scale_target_cluster_test.go create mode 100644 controllers/experimental/reconciler_update_status.go create mode 100644 controllers/experimental/reconciler_update_status_test.go create mode 100644 controllers/experimental/tree_loader.go create mode 100644 controllers/experimental/tree_loader_test.go create mode 100644 pkg/controller/builder/builder_cluster.go create mode 100644 pkg/controller/builder/builder_cluster_test.go create mode 100644 pkg/controller/builder/builder_node_aware_scaler.go create mode 100644 pkg/controller/builder/builder_node_aware_scaler_test.go diff --git a/apis/experimental/v1alpha1/nodeawarescaler_types.go b/apis/experimental/v1alpha1/nodeawarescaler_types.go index 045d5c6a25e..64ab77ecb78 100644 --- a/apis/experimental/v1alpha1/nodeawarescaler_types.go +++ b/apis/experimental/v1alpha1/nodeawarescaler_types.go @@ -80,8 +80,28 @@ type ComponentStatus struct { DesiredReplicas int32 `json:"desiredReplicas"` } -//+kubebuilder:object:root=true -//+kubebuilder:subresource:status +type ConditionType string + +const ( + // ScaleReady is added to a nodeawarescaler when all target components are ready. + ScaleReady ConditionType = "ScaleReady" +) + +const ( + // ReasonNotReady is a reason for condition ScaleReady. + ReasonNotReady = "NotReady" + + // ReasonReady is a reason for condition ScaleReady. + ReasonReady = "Ready" +) + +// +genclient +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:categories={kubeblocks,all},shortName=nas +// +kubebuilder:printcolumn:name="TARGET-CLUSTER-NAME",type="string",JSONPath=".spec.targetClusterName",description="target cluster name." +// +kubebuilder:printcolumn:name="REPLICAS",type="string",JSONPath=".status.componentStatuses",format="custom:{{range $index, $element := .}}{{if $index}}, {{end}}{{.Name}}: {{.CurrentReplicas}}/{{.DesiredReplicas}}{{end}}",description="ready replicas/desired replicas." +// +kubebuilder:printcolumn:name="LAST-SCALE-TIME",type="date",JSONPath=".status.lastScaleTime" // NodeAwareScaler is the Schema for the nodeawarescalers API type NodeAwareScaler struct { diff --git a/cmd/manager/main.go b/cmd/manager/main.go index a1910572117..0b2bda927c4 100644 --- a/cmd/manager/main.go +++ b/cmd/manager/main.go @@ -520,8 +520,9 @@ func main() { if viper.GetBool(experimentalFlagKey.viperName()) { if err = (&experimentalcontrollers.NodeAwareScalerReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Recorder: mgr.GetEventRecorderFor("node-aware-scaler-controller"), }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "NodeAwareScaler") os.Exit(1) diff --git a/config/crd/bases/experimental.kubeblocks.io_nodeawarescalers.yaml b/config/crd/bases/experimental.kubeblocks.io_nodeawarescalers.yaml index 525c830679d..fdcb3302af8 100644 --- a/config/crd/bases/experimental.kubeblocks.io_nodeawarescalers.yaml +++ b/config/crd/bases/experimental.kubeblocks.io_nodeawarescalers.yaml @@ -9,13 +9,32 @@ metadata: spec: group: experimental.kubeblocks.io names: + categories: + - kubeblocks + - all kind: NodeAwareScaler listKind: NodeAwareScalerList plural: nodeawarescalers + shortNames: + - nas singular: nodeawarescaler scope: Namespaced versions: - - name: v1alpha1 + - additionalPrinterColumns: + - description: target cluster name. + jsonPath: .spec.targetClusterName + name: TARGET-CLUSTER-NAME + type: string + - description: ready replicas/desired replicas. + format: 'custom:{{range $index, $element := .}}{{if $index}}, {{end}}{{.Name}}: + {{.CurrentReplicas}}/{{.DesiredReplicas}}{{end}}' + jsonPath: .status.componentStatuses + name: REPLICAS + type: string + - jsonPath: .status.lastScaleTime + name: LAST-SCALE-TIME + type: date + name: v1alpha1 schema: openAPIV3Schema: description: NodeAwareScaler is the Schema for the nodeawarescalers API diff --git a/controllers/experimental/nodeawarescaler_controller.go b/controllers/experimental/nodeawarescaler_controller.go index 2ffbbf8a0b4..bd809eab759 100644 --- a/controllers/experimental/nodeawarescaler_controller.go +++ b/controllers/experimental/nodeawarescaler_controller.go @@ -21,48 +21,57 @@ package experimental import ( "context" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/record" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" - experimentalv1alpha1 "github.com/apecloud/kubeblocks/apis/experimental/v1alpha1" + experimental "github.com/apecloud/kubeblocks/apis/experimental/v1alpha1" + "github.com/apecloud/kubeblocks/pkg/controller/kubebuilderx" ) // NodeAwareScalerReconciler reconciles a NodeAwareScaler object type NodeAwareScalerReconciler struct { client.Client - Scheme *runtime.Scheme + Scheme *runtime.Scheme + Recorder record.EventRecorder } //+kubebuilder:rbac:groups=experimental.kubeblocks.io,resources=nodeawarescalers,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=experimental.kubeblocks.io,resources=nodeawarescalers/status,verbs=get;update;patch //+kubebuilder:rbac:groups=experimental.kubeblocks.io,resources=nodeawarescalers/finalizers,verbs=update +// +kubebuilder:rbac:groups=apps.kubeblocks.io,resources=clusters,verbs=get;list;watch;update;patch + +// +kubebuilder:rbac:groups=workloads.kubeblocks.io,resources=instancesets,verbs=get;list;watch +// +kubebuilder:rbac:groups=workloads.kubeblocks.io,resources=instancesets/status,verbs=get + //+kubebuilder:rbac:groups="",resources=nodes,verbs=list;watch // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. -// TODO(user): Modify the Reconcile function to compare the state specified by -// the NodeAwareScaler object against the actual cluster state, and then -// perform operations to make the cluster state reflect the state specified by -// the user. // // For more details, check Reconcile and its Result here: // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.14.4/pkg/reconcile func (r *NodeAwareScalerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - _ = log.FromContext(ctx) + logger := log.FromContext(ctx).WithValues("NodeAwareScaler", req.NamespacedName) - // TODO(user): your logic here + err := kubebuilderx.NewController(ctx, r.Client, req, r.Recorder, logger). + Prepare(objectTree()). + Do(scaleTargetCluster()). + Do(updateStatus()). + Commit() - return ctrl.Result{}, nil + return ctrl.Result{}, err } // SetupWithManager sets up the controller with the Manager. func (r *NodeAwareScalerReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). - For(&experimentalv1alpha1.NodeAwareScaler{}). + For(&experimental.NodeAwareScaler{}). Watches(&corev1.Node{}, &nodeScalingHandler{r.Client}). Complete(r) } diff --git a/controllers/experimental/reconciler_scale_target_cluster.go b/controllers/experimental/reconciler_scale_target_cluster.go new file mode 100644 index 00000000000..9a9772dbdb7 --- /dev/null +++ b/controllers/experimental/reconciler_scale_target_cluster.go @@ -0,0 +1,84 @@ +/* +Copyright (C) 2022-2024 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package experimental + +import ( + "golang.org/x/exp/slices" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "time" + + appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" + experimental "github.com/apecloud/kubeblocks/apis/experimental/v1alpha1" + "github.com/apecloud/kubeblocks/pkg/controller/builder" + "github.com/apecloud/kubeblocks/pkg/controller/kubebuilderx" + "github.com/apecloud/kubeblocks/pkg/controller/model" +) + +type scaleTargetClusterReconciler struct{} + +func (r *scaleTargetClusterReconciler) PreCondition(tree *kubebuilderx.ObjectTree) *kubebuilderx.CheckResult { + if tree.GetRoot() == nil || model.IsObjectDeleting(tree.GetRoot()) { + return kubebuilderx.ResultUnsatisfied + } + return kubebuilderx.ResultSatisfied +} + +func (r *scaleTargetClusterReconciler) Reconcile(tree *kubebuilderx.ObjectTree) (*kubebuilderx.ObjectTree, error) { + scaler, _ := tree.GetRoot().(*experimental.NodeAwareScaler) + clusterKey := builder.NewClusterBuilder(scaler.Namespace, scaler.Spec.TargetClusterName).GetObject() + object, err := tree.Get(clusterKey) + if err != nil { + return nil, err + } + cluster, _ := object.(*appsv1alpha1.Cluster) + nodes := tree.List(&corev1.Node{}) + // TODO(free6om): filter nodes that satisfy pod template spec of each component (by nodeSelector, nodeAffinity&nodeAntiAffinity, tolerations) + desiredReplicas := int32(len(nodes)) + scaled := false + for i := range cluster.Spec.ComponentSpecs { + spec := &cluster.Spec.ComponentSpecs[i] + if slices.IndexFunc(scaler.Spec.TargetComponentNames, func(name string) bool { + return name == spec.Name + }) < 0 { + continue + } + if spec.Replicas != desiredReplicas { + spec.Replicas = desiredReplicas + scaled = true + } + } + if !scaled { + return tree, nil + } + + scaler.Status.LastScaleTime = metav1.Time{Time: time.Now()} + if err = tree.Update(cluster); err != nil { + return nil, err + } + + return tree, nil +} + +func scaleTargetCluster() kubebuilderx.Reconciler { + return &scaleTargetClusterReconciler{} +} + +var _ kubebuilderx.Reconciler = &scaleTargetClusterReconciler{} diff --git a/controllers/experimental/reconciler_scale_target_cluster_test.go b/controllers/experimental/reconciler_scale_target_cluster_test.go new file mode 100644 index 00000000000..b0d59fb6245 --- /dev/null +++ b/controllers/experimental/reconciler_scale_target_cluster_test.go @@ -0,0 +1,61 @@ +/* +Copyright (C) 2022-2024 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package experimental + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" + experimentalv1alpha1 "github.com/apecloud/kubeblocks/apis/experimental/v1alpha1" + "github.com/apecloud/kubeblocks/pkg/controller/builder" + "github.com/apecloud/kubeblocks/pkg/controller/kubebuilderx" +) + +var _ = Describe("scale target cluster reconciler test", func() { + BeforeEach(func() { + tree = mockTestTree() + }) + + Context("PreCondition & Reconcile", func() { + It("should work well", func() { + By("PreCondition") + reconciler := scaleTargetCluster() + Expect(reconciler.PreCondition(tree)).Should(Equal(kubebuilderx.ResultSatisfied)) + + By("Reconcile") + beforeReconcile := metav1.Now() + newTree, err := reconciler.Reconcile(tree) + Expect(err).Should(BeNil()) + newNAS, ok := newTree.GetRoot().(*experimentalv1alpha1.NodeAwareScaler) + Expect(ok).Should(BeTrue()) + Expect(newNAS.Status.LastScaleTime.Compare(beforeReconcile.Time)).Should(BeNumerically(">=", 0)) + object, err := newTree.Get(builder.NewClusterBuilder(newNAS.Namespace, newNAS.Spec.TargetClusterName).GetObject()) + Expect(err).Should(BeNil()) + newCluster, ok := object.(*appsv1alpha1.Cluster) + Expect(ok).Should(BeTrue()) + Expect(newCluster.Spec.ComponentSpecs).Should(HaveLen(2)) + Expect(newCluster.Spec.ComponentSpecs[0].Replicas).Should(BeEquivalentTo(2)) + Expect(newCluster.Spec.ComponentSpecs[1].Replicas).Should(BeEquivalentTo(2)) + }) + }) +}) diff --git a/controllers/experimental/reconciler_update_status.go b/controllers/experimental/reconciler_update_status.go new file mode 100644 index 00000000000..ce19735a966 --- /dev/null +++ b/controllers/experimental/reconciler_update_status.go @@ -0,0 +1,132 @@ +/* +Copyright (C) 2022-2024 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package experimental + +import ( + "fmt" + "strings" + + "golang.org/x/exp/slices" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + experimental "github.com/apecloud/kubeblocks/apis/experimental/v1alpha1" + workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" + "github.com/apecloud/kubeblocks/pkg/constant" + "github.com/apecloud/kubeblocks/pkg/controller/instanceset" + "github.com/apecloud/kubeblocks/pkg/controller/kubebuilderx" + "github.com/apecloud/kubeblocks/pkg/controller/model" +) + +type updateStatusReconciler struct{} + +func (r *updateStatusReconciler) PreCondition(tree *kubebuilderx.ObjectTree) *kubebuilderx.CheckResult { + if tree.GetRoot() == nil || model.IsObjectDeleting(tree.GetRoot()) { + return kubebuilderx.ResultUnsatisfied + } + return kubebuilderx.ResultSatisfied +} + +func (r *updateStatusReconciler) Reconcile(tree *kubebuilderx.ObjectTree) (*kubebuilderx.ObjectTree, error) { + scaler, _ := tree.GetRoot().(*experimental.NodeAwareScaler) + itsList := tree.List(&workloads.InstanceSet{}) + nodes := tree.List(&corev1.Node{}) + // TODO(free6om): filter nodes that satisfy pod template spec of each component (by nodeSelector, nodeAffinity&nodeAntiAffinity, tolerations) + desiredReplicas := int32(len(nodes)) + var statusList []experimental.ComponentStatus + for _, name := range scaler.Spec.TargetComponentNames { + index := slices.IndexFunc(itsList, func(object client.Object) bool { + fullName := constant.GenerateClusterComponentName(scaler.Spec.TargetClusterName, name) + return fullName == object.GetName() + }) + if index < 0 { + continue + } + its, _ := itsList[index].(*workloads.InstanceSet) + status := experimental.ComponentStatus{ + Name: name, + CurrentReplicas: its.Status.CurrentReplicas, + ReadyReplicas: its.Status.ReadyReplicas, + AvailableReplicas: its.Status.AvailableReplicas, + DesiredReplicas: desiredReplicas, + } + statusList = append(statusList, status) + } + instanceset.MergeList(&statusList, &scaler.Status.ComponentStatuses, + func(item experimental.ComponentStatus) func(experimental.ComponentStatus) bool { + return func(status experimental.ComponentStatus) bool { + return item.Name == status.Name + } + }) + + condition := buildScaleReadyCondition(scaler) + meta.SetStatusCondition(&scaler.Status.Conditions, *condition) + + return tree, nil +} + +func buildScaleReadyCondition(scaler *experimental.NodeAwareScaler) *metav1.Condition { + var ( + ready = true + notReadyNames []string + ) + for _, name := range scaler.Spec.TargetComponentNames { + index := slices.IndexFunc(scaler.Status.ComponentStatuses, func(status experimental.ComponentStatus) bool { + return status.Name == name + }) + if index < 0 { + ready = false + notReadyNames = append(notReadyNames, name) + continue + } + status := scaler.Status.ComponentStatuses[index] + if status.CurrentReplicas != status.DesiredReplicas || + status.ReadyReplicas != status.DesiredReplicas || + status.AvailableReplicas != status.DesiredReplicas { + ready = false + notReadyNames = append(notReadyNames, name) + } + } + + if !ready { + return &metav1.Condition{ + Type: string(experimental.ScaleReady), + Status: metav1.ConditionFalse, + ObservedGeneration: scaler.Generation, + Reason: experimental.ReasonNotReady, + Message: fmt.Sprintf("not ready components: %s", strings.Join(notReadyNames, ",")), + } + } + return &metav1.Condition{ + Type: string(experimental.ScaleReady), + Status: metav1.ConditionTrue, + ObservedGeneration: scaler.Generation, + Reason: experimental.ReasonReady, + Message: "scale ready", + } +} + +func updateStatus() kubebuilderx.Reconciler { + return &updateStatusReconciler{} +} + +var _ kubebuilderx.Reconciler = &updateStatusReconciler{} diff --git a/controllers/experimental/reconciler_update_status_test.go b/controllers/experimental/reconciler_update_status_test.go new file mode 100644 index 00000000000..a9887525abe --- /dev/null +++ b/controllers/experimental/reconciler_update_status_test.go @@ -0,0 +1,84 @@ +/* +Copyright (C) 2022-2024 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package experimental + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + experimentalv1alpha1 "github.com/apecloud/kubeblocks/apis/experimental/v1alpha1" + workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" + "github.com/apecloud/kubeblocks/pkg/controller/kubebuilderx" +) + +var _ = Describe("update status reconciler test", func() { + BeforeEach(func() { + tree = mockTestTree() + }) + + Context("PreCondition & Reconcile", func() { + It("should work well", func() { + var reconciler kubebuilderx.Reconciler + + By("scale target cluster") + reconciler = scaleTargetCluster() + Expect(reconciler.PreCondition(tree)).Should(Equal(kubebuilderx.ResultSatisfied)) + newTree, err := reconciler.Reconcile(tree) + Expect(err).Should(BeNil()) + + By("mock the workload to scale ready") + nodes := tree.List(&corev1.Node{}) + desiredReplicas := int32(len(nodes)) + itsList := tree.List(&workloads.InstanceSet{}) + for _, object := range itsList { + its, ok := object.(*workloads.InstanceSet) + Expect(ok).Should(BeTrue()) + its.Status.CurrentReplicas = desiredReplicas + its.Status.ReadyReplicas = desiredReplicas + its.Status.AvailableReplicas = desiredReplicas + } + + By("update status") + reconciler = updateStatus() + Expect(reconciler.PreCondition(newTree)).Should(Equal(kubebuilderx.ResultSatisfied)) + newTree, err = reconciler.Reconcile(tree) + Expect(err).Should(BeNil()) + newNAS, ok := newTree.GetRoot().(*experimentalv1alpha1.NodeAwareScaler) + Expect(ok).Should(BeTrue()) + Expect(newNAS.Status.ComponentStatuses).Should(HaveLen(2)) + Expect(newNAS.Status.ComponentStatuses[0].CurrentReplicas).Should(Equal(desiredReplicas)) + Expect(newNAS.Status.ComponentStatuses[0].ReadyReplicas).Should(Equal(desiredReplicas)) + Expect(newNAS.Status.ComponentStatuses[0].AvailableReplicas).Should(Equal(desiredReplicas)) + Expect(newNAS.Status.ComponentStatuses[0].DesiredReplicas).Should(Equal(desiredReplicas)) + Expect(newNAS.Status.ComponentStatuses[1].CurrentReplicas).Should(Equal(desiredReplicas)) + Expect(newNAS.Status.ComponentStatuses[1].ReadyReplicas).Should(Equal(desiredReplicas)) + Expect(newNAS.Status.ComponentStatuses[1].AvailableReplicas).Should(Equal(desiredReplicas)) + Expect(newNAS.Status.ComponentStatuses[1].DesiredReplicas).Should(Equal(desiredReplicas)) + Expect(newNAS.Status.Conditions).Should(HaveLen(1)) + Expect(newNAS.Status.Conditions[0].Type).Should(BeEquivalentTo(experimentalv1alpha1.ScaleReady)) + Expect(newNAS.Status.Conditions[0].Status).Should(Equal(metav1.ConditionTrue)) + Expect(newNAS.Status.Conditions[0].Reason).Should(Equal(experimentalv1alpha1.ReasonReady)) + Expect(newNAS.Status.Conditions[0].Message).Should(Equal("scale ready")) + }) + }) +}) diff --git a/controllers/experimental/suite_test.go b/controllers/experimental/suite_test.go index 2a704ce1fac..2744c715c28 100644 --- a/controllers/experimental/suite_test.go +++ b/controllers/experimental/suite_test.go @@ -20,6 +20,12 @@ along with this program. If not, see . package experimental import ( + appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" + "github.com/apecloud/kubeblocks/pkg/constant" + "github.com/apecloud/kubeblocks/pkg/controller/builder" + "github.com/apecloud/kubeblocks/pkg/controller/kubebuilderx" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "path/filepath" "testing" @@ -40,10 +46,59 @@ import ( // These tests use Ginkgo (BDD-style Go testing framework). Refer to // http://onsi.github.io/ginkgo/ to learn more about Ginkgo. +const ( + namespace = "foo" + name = "bar" +) + var cfg *rest.Config var k8sClient client.Client var testEnv *envtest.Environment +var ( + tree *kubebuilderx.ObjectTree + nas *experimentalv1alpha1.NodeAwareScaler + clusterName = "foo" + componentNames = []string{"bar-0", "bar-1"} +) + +func mockTestTree() *kubebuilderx.ObjectTree { + nas = builder.NewNodeAwareScalerBuilder(namespace, name). + SetTargetClusterName(clusterName). + SetTargetComponentNames(componentNames). + GetObject() + + specs := []appsv1alpha1.ClusterComponentSpec{ + { + Name: "foo-0", + }, + { + Name: "foo-1", + }, + } + cluster := builder.NewClusterBuilder(namespace, clusterName).SetComponentSpecs(specs).GetObject() + its0 := builder.NewInstanceSetBuilder(namespace, constant.GenerateClusterComponentName(clusterName, componentNames[0])).GetObject() + its1 := builder.NewInstanceSetBuilder(namespace, constant.GenerateClusterComponentName(clusterName, componentNames[1])).GetObject() + node0 := &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: "node-0", + }, + } + node1 := &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: "node-1", + }, + } + + tree = kubebuilderx.NewObjectTree() + tree.SetRoot(nas) + Expect(tree.Add(cluster, its0, its1, node0, node1)) + + return tree +} + func TestAPIs(t *testing.T) { RegisterFailHandler(Fail) diff --git a/controllers/experimental/tree_loader.go b/controllers/experimental/tree_loader.go new file mode 100644 index 00000000000..543e9a617b6 --- /dev/null +++ b/controllers/experimental/tree_loader.go @@ -0,0 +1,90 @@ +/* +Copyright (C) 2022-2024 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package experimental + +import ( + "context" + + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" + experimental "github.com/apecloud/kubeblocks/apis/experimental/v1alpha1" + workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" + "github.com/apecloud/kubeblocks/pkg/constant" + "github.com/apecloud/kubeblocks/pkg/controller/kubebuilderx" +) + +type treeLoader struct{} + +func (t *treeLoader) Load(ctx context.Context, reader client.Reader, req ctrl.Request, recorder record.EventRecorder, logger logr.Logger) (*kubebuilderx.ObjectTree, error) { + tree, err := kubebuilderx.ReadObjectTree[*experimental.NodeAwareScaler](ctx, reader, req, nil) + if err != nil { + return nil, err + } + root := tree.GetRoot() + if root == nil { + return tree, nil + } + scaler, _ := root.(*experimental.NodeAwareScaler) + key := types.NamespacedName{Namespace: scaler.Namespace, Name: scaler.Spec.TargetClusterName} + cluster := &appsv1alpha1.Cluster{} + if err = reader.Get(ctx, key, cluster); err != nil { + return nil, err + } + if err = tree.Add(cluster); err != nil { + return nil, err + } + for _, compName := range scaler.Spec.TargetComponentNames { + name := constant.GenerateClusterComponentName(scaler.Spec.TargetClusterName, compName) + key = types.NamespacedName{Namespace: scaler.Namespace, Name: name} + its := &workloads.InstanceSet{} + if err = reader.Get(ctx, key, its); err != nil { + return nil, err + } + if err = tree.Add(its); err != nil { + return nil, err + } + } + nodeList := &corev1.NodeList{} + if err = reader.List(ctx, nodeList); err != nil { + return nil, err + } + for i := range nodeList.Items { + if err = tree.Add(&nodeList.Items[i]); err != nil { + return nil, err + } + } + + tree.EventRecorder = recorder + tree.Logger = logger + + return tree, nil +} + +func objectTree() kubebuilderx.TreeLoader { + return &treeLoader{} +} + +var _ kubebuilderx.TreeLoader = &treeLoader{} diff --git a/controllers/experimental/tree_loader_test.go b/controllers/experimental/tree_loader_test.go new file mode 100644 index 00000000000..c07e291d4f0 --- /dev/null +++ b/controllers/experimental/tree_loader_test.go @@ -0,0 +1,113 @@ +/* +Copyright (C) 2022-2024 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package experimental + +import ( + "context" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/golang/mock/gomock" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" + + appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" + experimental "github.com/apecloud/kubeblocks/apis/experimental/v1alpha1" + workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" + "github.com/apecloud/kubeblocks/pkg/constant" + "github.com/apecloud/kubeblocks/pkg/controller/builder" + testutil "github.com/apecloud/kubeblocks/pkg/testutil/k8s" +) + +var _ = Describe("tree loader test", func() { + Context("Read", func() { + It("should work well", func() { + ctx := context.Background() + logger := logf.FromContext(ctx).WithValues("tree-loader-test", "foo") + controller, k8sMock := testutil.SetupK8sMock() + defer controller.Finish() + + clusterName := "foo" + componentNames := []string{"bar-0", "bar-1"} + root := builder.NewNodeAwareScalerBuilder(namespace, name).SetTargetClusterName(clusterName).SetTargetComponentNames(componentNames).GetObject() + cluster := builder.NewClusterBuilder(namespace, clusterName).GetObject() + its0 := builder.NewInstanceSetBuilder(namespace, constant.GenerateClusterComponentName(clusterName, componentNames[0])).GetObject() + its1 := builder.NewInstanceSetBuilder(namespace, constant.GenerateClusterComponentName(clusterName, componentNames[1])).GetObject() + node0 := &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: "node-0", + }, + } + node1 := &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: "node-1", + }, + } + + k8sMock.EXPECT(). + Get(gomock.Any(), gomock.Any(), &experimental.NodeAwareScaler{}, gomock.Any()). + DoAndReturn(func(_ context.Context, objKey client.ObjectKey, obj *experimental.NodeAwareScaler, _ ...client.GetOption) error { + *obj = *root + return nil + }).Times(1) + k8sMock.EXPECT(). + Get(gomock.Any(), gomock.Any(), &appsv1alpha1.Cluster{}, gomock.Any()). + DoAndReturn(func(_ context.Context, objKey client.ObjectKey, obj *appsv1alpha1.Cluster, _ ...client.GetOption) error { + *obj = *cluster + return nil + }).Times(1) + k8sMock.EXPECT(). + Get(gomock.Any(), gomock.Any(), &workloads.InstanceSet{}, gomock.Any()). + DoAndReturn(func(_ context.Context, objKey client.ObjectKey, obj *workloads.InstanceSet, _ ...client.GetOption) error { + if objKey.Name == its0.Name { + *obj = *its0 + } else { + *obj = *its1 + } + return nil + }).Times(2) + k8sMock.EXPECT(). + List(gomock.Any(), &corev1.NodeList{}, gomock.Any()). + DoAndReturn(func(_ context.Context, list *corev1.NodeList, _ ...client.ListOption) error { + list.Items = []corev1.Node{*node0, *node1} + return nil + }).Times(1) + req := ctrl.Request{NamespacedName: client.ObjectKeyFromObject(root)} + loader := objectTree() + tree, err := loader.Load(ctx, k8sMock, req, nil, logger) + Expect(err).Should(BeNil()) + Expect(tree.GetRoot()).ShouldNot(BeNil()) + Expect(tree.GetRoot()).Should(Equal(root)) + Expect(tree.GetSecondaryObjects()).Should(HaveLen(5)) + objectList := []client.Object{cluster, its0, its1, node0, node1} + for _, object := range objectList { + obj, err := tree.Get(object) + Expect(err).Should(BeNil()) + Expect(obj).Should(Equal(object)) + } + }) + }) +}) diff --git a/deploy/helm/crds/experimental.kubeblocks.io_nodeawarescalers.yaml b/deploy/helm/crds/experimental.kubeblocks.io_nodeawarescalers.yaml index 525c830679d..fdcb3302af8 100644 --- a/deploy/helm/crds/experimental.kubeblocks.io_nodeawarescalers.yaml +++ b/deploy/helm/crds/experimental.kubeblocks.io_nodeawarescalers.yaml @@ -9,13 +9,32 @@ metadata: spec: group: experimental.kubeblocks.io names: + categories: + - kubeblocks + - all kind: NodeAwareScaler listKind: NodeAwareScalerList plural: nodeawarescalers + shortNames: + - nas singular: nodeawarescaler scope: Namespaced versions: - - name: v1alpha1 + - additionalPrinterColumns: + - description: target cluster name. + jsonPath: .spec.targetClusterName + name: TARGET-CLUSTER-NAME + type: string + - description: ready replicas/desired replicas. + format: 'custom:{{range $index, $element := .}}{{if $index}}, {{end}}{{.Name}}: + {{.CurrentReplicas}}/{{.DesiredReplicas}}{{end}}' + jsonPath: .status.componentStatuses + name: REPLICAS + type: string + - jsonPath: .status.lastScaleTime + name: LAST-SCALE-TIME + type: date + name: v1alpha1 schema: openAPIV3Schema: description: NodeAwareScaler is the Schema for the nodeawarescalers API diff --git a/pkg/controller/builder/builder_cluster.go b/pkg/controller/builder/builder_cluster.go new file mode 100644 index 00000000000..c73597dc80b --- /dev/null +++ b/pkg/controller/builder/builder_cluster.go @@ -0,0 +1,39 @@ +/* +Copyright (C) 2022-2024 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package builder + +import ( + appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" +) + +type ClusterBuilder struct { + BaseBuilder[appsv1alpha1.Cluster, *appsv1alpha1.Cluster, ClusterBuilder] +} + +func NewClusterBuilder(namespace, name string) *ClusterBuilder { + builder := &ClusterBuilder{} + builder.init(namespace, name, &appsv1alpha1.Cluster{}, builder) + return builder +} + +func (builder *ClusterBuilder) SetComponentSpecs(specs []appsv1alpha1.ClusterComponentSpec) *ClusterBuilder { + builder.get().Spec.ComponentSpecs = specs + return builder +} diff --git a/pkg/controller/builder/builder_cluster_test.go b/pkg/controller/builder/builder_cluster_test.go new file mode 100644 index 00000000000..26f86b944b0 --- /dev/null +++ b/pkg/controller/builder/builder_cluster_test.go @@ -0,0 +1,50 @@ +/* +Copyright (C) 2022-2024 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package builder + +import ( + appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("cluster builder", func() { + It("should work well", func() { + const ( + name = "foo" + ns = "default" + ) + specs := []appsv1alpha1.ClusterComponentSpec{ + { + Name: "foo-0", + }, + { + Name: "foo-1", + }, + } + cluster := NewClusterBuilder(ns, name). + SetComponentSpecs(specs). + GetObject() + + Expect(cluster.Name).Should(Equal(name)) + Expect(cluster.Namespace).Should(Equal(ns)) + Expect(cluster.Spec.ComponentSpecs).Should(Equal(specs)) + }) +}) diff --git a/pkg/controller/builder/builder_instance_set_test.go b/pkg/controller/builder/builder_instance_set_test.go index a22052a1648..2b26881e61e 100644 --- a/pkg/controller/builder/builder_instance_set_test.go +++ b/pkg/controller/builder/builder_instance_set_test.go @@ -32,7 +32,7 @@ import ( workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" ) -var _ = Describe("replicated_state_machine builder", func() { +var _ = Describe("instance_set builder", func() { It("should work well", func() { const ( name = "foo" diff --git a/pkg/controller/builder/builder_node_aware_scaler.go b/pkg/controller/builder/builder_node_aware_scaler.go new file mode 100644 index 00000000000..d20e3a032d9 --- /dev/null +++ b/pkg/controller/builder/builder_node_aware_scaler.go @@ -0,0 +1,44 @@ +/* +Copyright (C) 2022-2024 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package builder + +import ( + experimental "github.com/apecloud/kubeblocks/apis/experimental/v1alpha1" +) + +type NodeAwareScalerBuilder struct { + BaseBuilder[experimental.NodeAwareScaler, *experimental.NodeAwareScaler, NodeAwareScalerBuilder] +} + +func NewNodeAwareScalerBuilder(namespace, name string) *NodeAwareScalerBuilder { + builder := &NodeAwareScalerBuilder{} + builder.init(namespace, name, &experimental.NodeAwareScaler{}, builder) + return builder +} + +func (builder *NodeAwareScalerBuilder) SetTargetClusterName(clusterName string) *NodeAwareScalerBuilder { + builder.get().Spec.TargetClusterName = clusterName + return builder +} + +func (builder *NodeAwareScalerBuilder) SetTargetComponentNames(componentNames []string) *NodeAwareScalerBuilder { + builder.get().Spec.TargetComponentNames = componentNames + return builder +} diff --git a/pkg/controller/builder/builder_node_aware_scaler_test.go b/pkg/controller/builder/builder_node_aware_scaler_test.go new file mode 100644 index 00000000000..3e49320dc72 --- /dev/null +++ b/pkg/controller/builder/builder_node_aware_scaler_test.go @@ -0,0 +1,46 @@ +/* +Copyright (C) 2022-2024 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package builder + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("node_aware_scaler builder", func() { + It("should work well", func() { + const ( + name = "foo" + ns = "default" + ) + clusterName := "target-cluster-name" + componentNames := []string{"comp-1", "comp-2"} + + nas := NewNodeAwareScalerBuilder(ns, name). + SetTargetClusterName(clusterName). + SetTargetComponentNames(componentNames). + GetObject() + + Expect(nas.Name).Should(Equal(name)) + Expect(nas.Namespace).Should(Equal(ns)) + Expect(nas.Spec.TargetClusterName).Should(Equal(clusterName)) + Expect(nas.Spec.TargetComponentNames).Should(Equal(componentNames)) + }) +}) diff --git a/pkg/controller/instanceset/in_place_update_util.go b/pkg/controller/instanceset/in_place_update_util.go index 0469f4fee3c..1476cda01df 100644 --- a/pkg/controller/instanceset/in_place_update_util.go +++ b/pkg/controller/instanceset/in_place_update_util.go @@ -131,7 +131,7 @@ func mergeInPlaceFields(src, dst *corev1.Pod) { dst.Spec.ActiveDeadlineSeconds = src.Spec.ActiveDeadlineSeconds // according to the Pod API spec, tolerations can only be appended. // means old tolerations must be in new toleration list. - mergeList(&src.Spec.Tolerations, &dst.Spec.Tolerations, func(item corev1.Toleration) func(corev1.Toleration) bool { + MergeList(&src.Spec.Tolerations, &dst.Spec.Tolerations, func(item corev1.Toleration) func(corev1.Toleration) bool { return func(t corev1.Toleration) bool { return reflect.DeepEqual(item, t) } diff --git a/pkg/controller/instanceset/instance_util.go b/pkg/controller/instanceset/instance_util.go index 92f7ecb108b..4727498c47a 100644 --- a/pkg/controller/instanceset/instance_util.go +++ b/pkg/controller/instanceset/instance_util.go @@ -322,7 +322,7 @@ func buildInstanceByTemplate(name string, template *instanceTemplateExt, parent }).GetObject() volumeList = append(volumeList, *volume) } - mergeList(&volumeList, &pod.Spec.Volumes, func(item corev1.Volume) func(corev1.Volume) bool { + MergeList(&volumeList, &pod.Spec.Volumes, func(item corev1.Volume) func(corev1.Volume) bool { return func(v corev1.Volume) bool { return v.Name == item.Name } @@ -602,7 +602,7 @@ func buildInstanceTemplateExt(template workloads.InstanceTemplate, templateExt * mergeCPUNMemory(&src.Requests, &dst.Requests) } if template.Env != nil { - mergeList(&template.Env, &templateExt.Spec.Containers[0].Env, + MergeList(&template.Env, &templateExt.Spec.Containers[0].Env, func(item corev1.EnvVar) func(corev1.EnvVar) bool { return func(env corev1.EnvVar) bool { return env.Name == item.Name @@ -610,25 +610,25 @@ func buildInstanceTemplateExt(template workloads.InstanceTemplate, templateExt * }) } } - mergeList(&template.Tolerations, &templateExt.Spec.Tolerations, + MergeList(&template.Tolerations, &templateExt.Spec.Tolerations, func(item corev1.Toleration) func(corev1.Toleration) bool { return func(t corev1.Toleration) bool { return reflect.DeepEqual(item, t) } }) - mergeList(&template.Volumes, &templateExt.Spec.Volumes, + MergeList(&template.Volumes, &templateExt.Spec.Volumes, func(item corev1.Volume) func(corev1.Volume) bool { return func(v corev1.Volume) bool { return v.Name == item.Name } }) - mergeList(&template.VolumeMounts, &templateExt.Spec.Containers[0].VolumeMounts, + MergeList(&template.VolumeMounts, &templateExt.Spec.Containers[0].VolumeMounts, func(item corev1.VolumeMount) func(corev1.VolumeMount) bool { return func(vm corev1.VolumeMount) bool { return vm.Name == item.Name } }) - mergeList(&template.VolumeClaimTemplates, &templateExt.VolumeClaimTemplates, + MergeList(&template.VolumeClaimTemplates, &templateExt.VolumeClaimTemplates, func(item corev1.PersistentVolumeClaim) func(corev1.PersistentVolumeClaim) bool { return func(claim corev1.PersistentVolumeClaim) bool { return claim.Name == item.Name diff --git a/pkg/controller/instanceset/utils.go b/pkg/controller/instanceset/utils.go index cef83cf416f..3697ac568a0 100644 --- a/pkg/controller/instanceset/utils.go +++ b/pkg/controller/instanceset/utils.go @@ -197,7 +197,7 @@ func mergeMap[K comparable, V any](src, dst *map[K]V) { } } -func mergeList[E any](src, dst *[]E, f func(E) func(E) bool) { +func MergeList[E any](src, dst *[]E, f func(E) func(E) bool) { if len(*src) == 0 { return } diff --git a/pkg/controller/instanceset/utils_test.go b/pkg/controller/instanceset/utils_test.go index 7d0c237edf1..61ca3dbe69d 100644 --- a/pkg/controller/instanceset/utils_test.go +++ b/pkg/controller/instanceset/utils_test.go @@ -40,7 +40,7 @@ var _ = Describe("utils test", func() { priorityMap = ComposeRolePriorityMap(its.Spec.Roles) }) - Context("mergeList", func() { + Context("MergeList", func() { It("should work well", func() { src := []corev1.Volume{ { @@ -78,7 +78,7 @@ var _ = Describe("utils test", func() { }, }, } - mergeList(&src, &dst, func(v corev1.Volume) func(corev1.Volume) bool { + MergeList(&src, &dst, func(v corev1.Volume) func(corev1.Volume) bool { return func(volume corev1.Volume) bool { return v.Name == volume.Name } From 69a2267f115d4f6d25d48c362f7fac6405d39635 Mon Sep 17 00:00:00 2001 From: free6om Date: Tue, 7 May 2024 14:15:59 +0800 Subject: [PATCH 04/14] fix ut --- .../v1alpha1/nodeawarescaler_types.go | 4 +- ...mental.kubeblocks.io_nodeawarescalers.yaml | 16 +++++--- .../reconciler_scale_target_cluster_test.go | 7 +++- controllers/experimental/suite_test.go | 39 ++++--------------- ...mental.kubeblocks.io_nodeawarescalers.yaml | 16 +++++--- 5 files changed, 37 insertions(+), 45 deletions(-) diff --git a/apis/experimental/v1alpha1/nodeawarescaler_types.go b/apis/experimental/v1alpha1/nodeawarescaler_types.go index 64ab77ecb78..b9f554eb8e7 100644 --- a/apis/experimental/v1alpha1/nodeawarescaler_types.go +++ b/apis/experimental/v1alpha1/nodeawarescaler_types.go @@ -100,7 +100,9 @@ const ( // +kubebuilder:subresource:status // +kubebuilder:resource:categories={kubeblocks,all},shortName=nas // +kubebuilder:printcolumn:name="TARGET-CLUSTER-NAME",type="string",JSONPath=".spec.targetClusterName",description="target cluster name." -// +kubebuilder:printcolumn:name="REPLICAS",type="string",JSONPath=".status.componentStatuses",format="custom:{{range $index, $element := .}}{{if $index}}, {{end}}{{.Name}}: {{.CurrentReplicas}}/{{.DesiredReplicas}}{{end}}",description="ready replicas/desired replicas." +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.componentStatuses[?(@.type==ScaleReady)].status",description="scale ready." +// +kubebuilder:printcolumn:name="REASON",type="string",JSONPath=".status.componentStatuses[?(@.type==ScaleReady)].reason",description="reason." +// +kubebuilder:printcolumn:name="MESSAGE",type="string",JSONPath=".status.componentStatuses[?(@.type==ScaleReady)].message",description="message." // +kubebuilder:printcolumn:name="LAST-SCALE-TIME",type="date",JSONPath=".status.lastScaleTime" // NodeAwareScaler is the Schema for the nodeawarescalers API diff --git a/config/crd/bases/experimental.kubeblocks.io_nodeawarescalers.yaml b/config/crd/bases/experimental.kubeblocks.io_nodeawarescalers.yaml index fdcb3302af8..b05e9de0bfa 100644 --- a/config/crd/bases/experimental.kubeblocks.io_nodeawarescalers.yaml +++ b/config/crd/bases/experimental.kubeblocks.io_nodeawarescalers.yaml @@ -25,11 +25,17 @@ spec: jsonPath: .spec.targetClusterName name: TARGET-CLUSTER-NAME type: string - - description: ready replicas/desired replicas. - format: 'custom:{{range $index, $element := .}}{{if $index}}, {{end}}{{.Name}}: - {{.CurrentReplicas}}/{{.DesiredReplicas}}{{end}}' - jsonPath: .status.componentStatuses - name: REPLICAS + - description: scale ready. + jsonPath: .status.componentStatuses[?(@.type==ScaleReady)].status + name: READY + type: string + - description: reason. + jsonPath: .status.componentStatuses[?(@.type==ScaleReady)].reason + name: REASON + type: string + - description: message. + jsonPath: .status.componentStatuses[?(@.type==ScaleReady)].message + name: MESSAGE type: string - jsonPath: .status.lastScaleTime name: LAST-SCALE-TIME diff --git a/controllers/experimental/reconciler_scale_target_cluster_test.go b/controllers/experimental/reconciler_scale_target_cluster_test.go index b0d59fb6245..c8f2e8559a1 100644 --- a/controllers/experimental/reconciler_scale_target_cluster_test.go +++ b/controllers/experimental/reconciler_scale_target_cluster_test.go @@ -23,6 +23,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" @@ -53,9 +54,11 @@ var _ = Describe("scale target cluster reconciler test", func() { Expect(err).Should(BeNil()) newCluster, ok := object.(*appsv1alpha1.Cluster) Expect(ok).Should(BeTrue()) + nodes := newTree.List(&corev1.Node{}) + desiredReplicas := int32(len(nodes)) Expect(newCluster.Spec.ComponentSpecs).Should(HaveLen(2)) - Expect(newCluster.Spec.ComponentSpecs[0].Replicas).Should(BeEquivalentTo(2)) - Expect(newCluster.Spec.ComponentSpecs[1].Replicas).Should(BeEquivalentTo(2)) + Expect(newCluster.Spec.ComponentSpecs[0].Replicas).Should(Equal(desiredReplicas)) + Expect(newCluster.Spec.ComponentSpecs[1].Replicas).Should(Equal(desiredReplicas)) }) }) }) diff --git a/controllers/experimental/suite_test.go b/controllers/experimental/suite_test.go index 2744c715c28..e14a68ba545 100644 --- a/controllers/experimental/suite_test.go +++ b/controllers/experimental/suite_test.go @@ -21,21 +21,18 @@ package experimental import ( appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" + workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" "github.com/apecloud/kubeblocks/pkg/constant" "github.com/apecloud/kubeblocks/pkg/controller/builder" "github.com/apecloud/kubeblocks/pkg/controller/kubebuilderx" + "github.com/apecloud/kubeblocks/pkg/controller/model" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "path/filepath" "testing" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/rest" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/envtest" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" @@ -51,10 +48,6 @@ const ( name = "bar" ) -var cfg *rest.Config -var k8sClient client.Client -var testEnv *envtest.Environment - var ( tree *kubebuilderx.ObjectTree nas *experimentalv1alpha1.NodeAwareScaler @@ -70,10 +63,10 @@ func mockTestTree() *kubebuilderx.ObjectTree { specs := []appsv1alpha1.ClusterComponentSpec{ { - Name: "foo-0", + Name: componentNames[0], }, { - Name: "foo-1", + Name: componentNames[1], }, } cluster := builder.NewClusterBuilder(namespace, clusterName).SetComponentSpecs(specs).GetObject() @@ -108,31 +101,13 @@ func TestAPIs(t *testing.T) { var _ = BeforeSuite(func() { logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) - By("bootstrapping test environment") - testEnv = &envtest.Environment{ - CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")}, - ErrorIfCRDPathMissing: true, - } - - var err error - // cfg is defined in this file globally. - cfg, err = testEnv.Start() - Expect(err).NotTo(HaveOccurred()) - Expect(cfg).NotTo(BeNil()) - - err = experimentalv1alpha1.AddToScheme(scheme.Scheme) - Expect(err).NotTo(HaveOccurred()) + model.AddScheme(experimentalv1alpha1.AddToScheme) + model.AddScheme(appsv1alpha1.AddToScheme) + model.AddScheme(workloads.AddToScheme) //+kubebuilder:scaffold:scheme - k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) - Expect(err).NotTo(HaveOccurred()) - Expect(k8sClient).NotTo(BeNil()) - }) var _ = AfterSuite(func() { - By("tearing down the test environment") - err := testEnv.Stop() - Expect(err).NotTo(HaveOccurred()) }) diff --git a/deploy/helm/crds/experimental.kubeblocks.io_nodeawarescalers.yaml b/deploy/helm/crds/experimental.kubeblocks.io_nodeawarescalers.yaml index fdcb3302af8..b05e9de0bfa 100644 --- a/deploy/helm/crds/experimental.kubeblocks.io_nodeawarescalers.yaml +++ b/deploy/helm/crds/experimental.kubeblocks.io_nodeawarescalers.yaml @@ -25,11 +25,17 @@ spec: jsonPath: .spec.targetClusterName name: TARGET-CLUSTER-NAME type: string - - description: ready replicas/desired replicas. - format: 'custom:{{range $index, $element := .}}{{if $index}}, {{end}}{{.Name}}: - {{.CurrentReplicas}}/{{.DesiredReplicas}}{{end}}' - jsonPath: .status.componentStatuses - name: REPLICAS + - description: scale ready. + jsonPath: .status.componentStatuses[?(@.type==ScaleReady)].status + name: READY + type: string + - description: reason. + jsonPath: .status.componentStatuses[?(@.type==ScaleReady)].reason + name: REASON + type: string + - description: message. + jsonPath: .status.componentStatuses[?(@.type==ScaleReady)].message + name: MESSAGE type: string - jsonPath: .status.lastScaleTime name: LAST-SCALE-TIME From 0e479ac5c69a22ca2a5bfb1a81d4c2a25400c796 Mon Sep 17 00:00:00 2001 From: free6om Date: Tue, 7 May 2024 14:19:08 +0800 Subject: [PATCH 05/14] fix lint error --- .../reconciler_scale_target_cluster.go | 3 ++- controllers/experimental/suite_test.go | 18 +++++++++--------- 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/controllers/experimental/reconciler_scale_target_cluster.go b/controllers/experimental/reconciler_scale_target_cluster.go index 9a9772dbdb7..2e8e4f854d0 100644 --- a/controllers/experimental/reconciler_scale_target_cluster.go +++ b/controllers/experimental/reconciler_scale_target_cluster.go @@ -20,10 +20,11 @@ along with this program. If not, see . package experimental import ( + "time" + "golang.org/x/exp/slices" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "time" appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" experimental "github.com/apecloud/kubeblocks/apis/experimental/v1alpha1" diff --git a/controllers/experimental/suite_test.go b/controllers/experimental/suite_test.go index e14a68ba545..59cbb88787a 100644 --- a/controllers/experimental/suite_test.go +++ b/controllers/experimental/suite_test.go @@ -20,24 +20,24 @@ along with this program. If not, see . package experimental import ( - appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" - "github.com/apecloud/kubeblocks/pkg/constant" - "github.com/apecloud/kubeblocks/pkg/controller/builder" - "github.com/apecloud/kubeblocks/pkg/controller/kubebuilderx" - "github.com/apecloud/kubeblocks/pkg/controller/model" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "testing" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" - experimentalv1alpha1 "github.com/apecloud/kubeblocks/apis/experimental/v1alpha1" //+kubebuilder:scaffold:imports + appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" + experimentalv1alpha1 "github.com/apecloud/kubeblocks/apis/experimental/v1alpha1" + workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" + "github.com/apecloud/kubeblocks/pkg/constant" + "github.com/apecloud/kubeblocks/pkg/controller/builder" + "github.com/apecloud/kubeblocks/pkg/controller/kubebuilderx" + "github.com/apecloud/kubeblocks/pkg/controller/model" ) // These tests use Ginkgo (BDD-style Go testing framework). Refer to From d24e72828f2537cd6a5427d186cf57cc6a40ca5c Mon Sep 17 00:00:00 2001 From: free6om Date: Tue, 7 May 2024 15:11:47 +0800 Subject: [PATCH 06/14] fix event handler bug --- .../v1alpha1/nodeawarescaler_types.go | 10 +-- ...mental.kubeblocks.io_nodeawarescalers.yaml | 18 ++--- controllers/experimental/cluster_handler.go | 66 +++++++++++++++++++ .../experimental/node_scaling_handler.go | 4 +- .../nodeawarescaler_controller.go | 2 + ...mental.kubeblocks.io_nodeawarescalers.yaml | 18 ++--- 6 files changed, 94 insertions(+), 24 deletions(-) create mode 100644 controllers/experimental/cluster_handler.go diff --git a/apis/experimental/v1alpha1/nodeawarescaler_types.go b/apis/experimental/v1alpha1/nodeawarescaler_types.go index b9f554eb8e7..4e684d77df5 100644 --- a/apis/experimental/v1alpha1/nodeawarescaler_types.go +++ b/apis/experimental/v1alpha1/nodeawarescaler_types.go @@ -29,13 +29,13 @@ import ( // NodeAwareScalerSpec defines the desired state of NodeAwareScaler type NodeAwareScalerSpec struct { // Specified the target Cluster name this scaler applies to. - TargetClusterName string `json:"targetName"` + TargetClusterName string `json:"targetClusterName"` // Specified the target Component names this scaler applies to. // All Components will be applied if not set. // // +optional - TargetComponentNames []string `json:"TargetComponentNames,omitempty"` + TargetComponentNames []string `json:"targetComponentNames,omitempty"` } // NodeAwareScalerStatus defines the observed state of NodeAwareScaler @@ -100,9 +100,9 @@ const ( // +kubebuilder:subresource:status // +kubebuilder:resource:categories={kubeblocks,all},shortName=nas // +kubebuilder:printcolumn:name="TARGET-CLUSTER-NAME",type="string",JSONPath=".spec.targetClusterName",description="target cluster name." -// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.componentStatuses[?(@.type==ScaleReady)].status",description="scale ready." -// +kubebuilder:printcolumn:name="REASON",type="string",JSONPath=".status.componentStatuses[?(@.type==ScaleReady)].reason",description="reason." -// +kubebuilder:printcolumn:name="MESSAGE",type="string",JSONPath=".status.componentStatuses[?(@.type==ScaleReady)].message",description="message." +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type==ScaleReady)].status",description="scale ready." +// +kubebuilder:printcolumn:name="REASON",type="string",JSONPath=".status.conditions[?(@.type==ScaleReady)].reason",description="reason." +// +kubebuilder:printcolumn:name="MESSAGE",type="string",JSONPath=".status.conditions[?(@.type==ScaleReady)].message",description="message." // +kubebuilder:printcolumn:name="LAST-SCALE-TIME",type="date",JSONPath=".status.lastScaleTime" // NodeAwareScaler is the Schema for the nodeawarescalers API diff --git a/config/crd/bases/experimental.kubeblocks.io_nodeawarescalers.yaml b/config/crd/bases/experimental.kubeblocks.io_nodeawarescalers.yaml index b05e9de0bfa..aedbd3e9dab 100644 --- a/config/crd/bases/experimental.kubeblocks.io_nodeawarescalers.yaml +++ b/config/crd/bases/experimental.kubeblocks.io_nodeawarescalers.yaml @@ -26,15 +26,15 @@ spec: name: TARGET-CLUSTER-NAME type: string - description: scale ready. - jsonPath: .status.componentStatuses[?(@.type==ScaleReady)].status + jsonPath: .status.conditions[?(@.type==ScaleReady)].status name: READY type: string - description: reason. - jsonPath: .status.componentStatuses[?(@.type==ScaleReady)].reason + jsonPath: .status.conditions[?(@.type==ScaleReady)].reason name: REASON type: string - description: message. - jsonPath: .status.componentStatuses[?(@.type==ScaleReady)].message + jsonPath: .status.conditions[?(@.type==ScaleReady)].message name: MESSAGE type: string - jsonPath: .status.lastScaleTime @@ -60,18 +60,18 @@ spec: spec: description: NodeAwareScalerSpec defines the desired state of NodeAwareScaler properties: - TargetComponentNames: + targetClusterName: + description: Specified the target Cluster name this scaler applies + to. + type: string + targetComponentNames: description: Specified the target Component names this scaler applies to. All Components will be applied if not set. items: type: string type: array - targetName: - description: Specified the target Cluster name this scaler applies - to. - type: string required: - - targetName + - targetClusterName type: object status: description: NodeAwareScalerStatus defines the observed state of NodeAwareScaler diff --git a/controllers/experimental/cluster_handler.go b/controllers/experimental/cluster_handler.go new file mode 100644 index 00000000000..a97775ca236 --- /dev/null +++ b/controllers/experimental/cluster_handler.go @@ -0,0 +1,66 @@ +/* +Copyright (C) 2022-2024 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package experimental + +import ( + "context" + + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/workqueue" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + + experimental "github.com/apecloud/kubeblocks/apis/experimental/v1alpha1" +) + +type clusterHandler struct { + client.Client +} + +func (h *clusterHandler) Create(ctx context.Context, event event.CreateEvent, limitingInterface workqueue.RateLimitingInterface) { + h.mapAndEnqueue(ctx, limitingInterface, event.Object) +} + +func (h *clusterHandler) Update(ctx context.Context, event event.UpdateEvent, limitingInterface workqueue.RateLimitingInterface) { + h.mapAndEnqueue(ctx, limitingInterface, event.ObjectNew) +} + +func (h *clusterHandler) Delete(ctx context.Context, event event.DeleteEvent, limitingInterface workqueue.RateLimitingInterface) { +} + +func (h *clusterHandler) Generic(ctx context.Context, event event.GenericEvent, limitingInterface workqueue.RateLimitingInterface) { +} + +func (h *clusterHandler) mapAndEnqueue(ctx context.Context, q workqueue.RateLimitingInterface, object client.Object) { + scalerList := &experimental.NodeAwareScalerList{} + if err := h.Client.List(ctx, scalerList); err == nil { + for _, item := range scalerList.Items { + if item.Spec.TargetClusterName == object.GetName() && + item.Namespace == object.GetNamespace() { + q.Add(ctrl.Request{NamespacedName: types.NamespacedName{Namespace: item.Namespace, Name: item.Name}}) + break + } + } + } +} + +var _ handler.EventHandler = &clusterHandler{} diff --git a/controllers/experimental/node_scaling_handler.go b/controllers/experimental/node_scaling_handler.go index a75b80eba92..575b2e69a82 100644 --- a/controllers/experimental/node_scaling_handler.go +++ b/controllers/experimental/node_scaling_handler.go @@ -22,7 +22,9 @@ package experimental import ( "context" + "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/workqueue" + ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" @@ -52,7 +54,7 @@ func (h *nodeScalingHandler) mapAndEnqueue(ctx context.Context, q workqueue.Rate scalerList := &experimental.NodeAwareScalerList{} if err := h.Client.List(ctx, scalerList); err == nil { for _, item := range scalerList.Items { - q.Add(client.ObjectKeyFromObject(&item)) + q.Add(ctrl.Request{NamespacedName: types.NamespacedName{Namespace: item.Namespace, Name: item.Name}}) } } } diff --git a/controllers/experimental/nodeawarescaler_controller.go b/controllers/experimental/nodeawarescaler_controller.go index bd809eab759..57d45c4b368 100644 --- a/controllers/experimental/nodeawarescaler_controller.go +++ b/controllers/experimental/nodeawarescaler_controller.go @@ -21,6 +21,7 @@ package experimental import ( "context" + appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" @@ -73,5 +74,6 @@ func (r *NodeAwareScalerReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&experimental.NodeAwareScaler{}). Watches(&corev1.Node{}, &nodeScalingHandler{r.Client}). + Watches(&appsv1alpha1.Cluster{}, &clusterHandler{r.Client}). Complete(r) } diff --git a/deploy/helm/crds/experimental.kubeblocks.io_nodeawarescalers.yaml b/deploy/helm/crds/experimental.kubeblocks.io_nodeawarescalers.yaml index b05e9de0bfa..aedbd3e9dab 100644 --- a/deploy/helm/crds/experimental.kubeblocks.io_nodeawarescalers.yaml +++ b/deploy/helm/crds/experimental.kubeblocks.io_nodeawarescalers.yaml @@ -26,15 +26,15 @@ spec: name: TARGET-CLUSTER-NAME type: string - description: scale ready. - jsonPath: .status.componentStatuses[?(@.type==ScaleReady)].status + jsonPath: .status.conditions[?(@.type==ScaleReady)].status name: READY type: string - description: reason. - jsonPath: .status.componentStatuses[?(@.type==ScaleReady)].reason + jsonPath: .status.conditions[?(@.type==ScaleReady)].reason name: REASON type: string - description: message. - jsonPath: .status.componentStatuses[?(@.type==ScaleReady)].message + jsonPath: .status.conditions[?(@.type==ScaleReady)].message name: MESSAGE type: string - jsonPath: .status.lastScaleTime @@ -60,18 +60,18 @@ spec: spec: description: NodeAwareScalerSpec defines the desired state of NodeAwareScaler properties: - TargetComponentNames: + targetClusterName: + description: Specified the target Cluster name this scaler applies + to. + type: string + targetComponentNames: description: Specified the target Component names this scaler applies to. All Components will be applied if not set. items: type: string type: array - targetName: - description: Specified the target Cluster name this scaler applies - to. - type: string required: - - targetName + - targetClusterName type: object status: description: NodeAwareScalerStatus defines the observed state of NodeAwareScaler From 715c58bac8ba07caba2dc21e1b0fd83cf0ced3e9 Mon Sep 17 00:00:00 2001 From: free6om Date: Tue, 7 May 2024 15:17:25 +0800 Subject: [PATCH 07/14] fix printColumn --- apis/experimental/v1alpha1/nodeawarescaler_types.go | 6 +++--- .../bases/experimental.kubeblocks.io_nodeawarescalers.yaml | 6 +++--- .../crds/experimental.kubeblocks.io_nodeawarescalers.yaml | 6 +++--- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/apis/experimental/v1alpha1/nodeawarescaler_types.go b/apis/experimental/v1alpha1/nodeawarescaler_types.go index 4e684d77df5..d964d674eb3 100644 --- a/apis/experimental/v1alpha1/nodeawarescaler_types.go +++ b/apis/experimental/v1alpha1/nodeawarescaler_types.go @@ -100,9 +100,9 @@ const ( // +kubebuilder:subresource:status // +kubebuilder:resource:categories={kubeblocks,all},shortName=nas // +kubebuilder:printcolumn:name="TARGET-CLUSTER-NAME",type="string",JSONPath=".spec.targetClusterName",description="target cluster name." -// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type==ScaleReady)].status",description="scale ready." -// +kubebuilder:printcolumn:name="REASON",type="string",JSONPath=".status.conditions[?(@.type==ScaleReady)].reason",description="reason." -// +kubebuilder:printcolumn:name="MESSAGE",type="string",JSONPath=".status.conditions[?(@.type==ScaleReady)].message",description="message." +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type==\"ScaleReady\")].status",description="scale ready." +// +kubebuilder:printcolumn:name="REASON",type="string",JSONPath=".status.conditions[?(@.type==\"ScaleReady\")].reason",description="reason." +// +kubebuilder:printcolumn:name="MESSAGE",type="string",JSONPath=".status.conditions[?(@.type==\"ScaleReady\")].message",description="message." // +kubebuilder:printcolumn:name="LAST-SCALE-TIME",type="date",JSONPath=".status.lastScaleTime" // NodeAwareScaler is the Schema for the nodeawarescalers API diff --git a/config/crd/bases/experimental.kubeblocks.io_nodeawarescalers.yaml b/config/crd/bases/experimental.kubeblocks.io_nodeawarescalers.yaml index aedbd3e9dab..37253f5b894 100644 --- a/config/crd/bases/experimental.kubeblocks.io_nodeawarescalers.yaml +++ b/config/crd/bases/experimental.kubeblocks.io_nodeawarescalers.yaml @@ -26,15 +26,15 @@ spec: name: TARGET-CLUSTER-NAME type: string - description: scale ready. - jsonPath: .status.conditions[?(@.type==ScaleReady)].status + jsonPath: .status.conditions[?(@.type=="ScaleReady")].status name: READY type: string - description: reason. - jsonPath: .status.conditions[?(@.type==ScaleReady)].reason + jsonPath: .status.conditions[?(@.type=="ScaleReady")].reason name: REASON type: string - description: message. - jsonPath: .status.conditions[?(@.type==ScaleReady)].message + jsonPath: .status.conditions[?(@.type=="ScaleReady")].message name: MESSAGE type: string - jsonPath: .status.lastScaleTime diff --git a/deploy/helm/crds/experimental.kubeblocks.io_nodeawarescalers.yaml b/deploy/helm/crds/experimental.kubeblocks.io_nodeawarescalers.yaml index aedbd3e9dab..37253f5b894 100644 --- a/deploy/helm/crds/experimental.kubeblocks.io_nodeawarescalers.yaml +++ b/deploy/helm/crds/experimental.kubeblocks.io_nodeawarescalers.yaml @@ -26,15 +26,15 @@ spec: name: TARGET-CLUSTER-NAME type: string - description: scale ready. - jsonPath: .status.conditions[?(@.type==ScaleReady)].status + jsonPath: .status.conditions[?(@.type=="ScaleReady")].status name: READY type: string - description: reason. - jsonPath: .status.conditions[?(@.type==ScaleReady)].reason + jsonPath: .status.conditions[?(@.type=="ScaleReady")].reason name: REASON type: string - description: message. - jsonPath: .status.conditions[?(@.type==ScaleReady)].message + jsonPath: .status.conditions[?(@.type=="ScaleReady")].message name: MESSAGE type: string - jsonPath: .status.lastScaleTime From 843f4b554f46a98e06cd5995cbf0faf968abf9ca Mon Sep 17 00:00:00 2001 From: free6om Date: Tue, 7 May 2024 15:21:11 +0800 Subject: [PATCH 08/14] fix imports --- cmd/manager/main.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/cmd/manager/main.go b/cmd/manager/main.go index 0b2bda927c4..7d33d1daf7f 100644 --- a/cmd/manager/main.go +++ b/cmd/manager/main.go @@ -47,20 +47,19 @@ import ( "sigs.k8s.io/controller-runtime/pkg/metrics/server" "sigs.k8s.io/controller-runtime/pkg/webhook" - experimentalv1alpha1 "github.com/apecloud/kubeblocks/apis/experimental/v1alpha1" - experimentalcontrollers "github.com/apecloud/kubeblocks/controllers/experimental" - // +kubebuilder:scaffold:imports appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" appsv1beta1 "github.com/apecloud/kubeblocks/apis/apps/v1beta1" dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + experimentalv1alpha1 "github.com/apecloud/kubeblocks/apis/experimental/v1alpha1" extensionsv1alpha1 "github.com/apecloud/kubeblocks/apis/extensions/v1alpha1" storagev1alpha1 "github.com/apecloud/kubeblocks/apis/storage/v1alpha1" "github.com/apecloud/kubeblocks/apis/workloads/legacy" workloadsv1alpha1 "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" appscontrollers "github.com/apecloud/kubeblocks/controllers/apps" "github.com/apecloud/kubeblocks/controllers/apps/configuration" + experimentalcontrollers "github.com/apecloud/kubeblocks/controllers/experimental" extensionscontrollers "github.com/apecloud/kubeblocks/controllers/extensions" k8scorecontrollers "github.com/apecloud/kubeblocks/controllers/k8score" workloadscontrollers "github.com/apecloud/kubeblocks/controllers/workloads" From af36492c88dd036e0b5a187f14d587557a4e3926 Mon Sep 17 00:00:00 2001 From: free6om Date: Tue, 7 May 2024 15:24:18 +0800 Subject: [PATCH 09/14] make generate --- .../v1alpha1/zz_generated.deepcopy.go | 38 ++++++++++++++++++- 1 file changed, 36 insertions(+), 2 deletions(-) diff --git a/apis/experimental/v1alpha1/zz_generated.deepcopy.go b/apis/experimental/v1alpha1/zz_generated.deepcopy.go index 0c3620407f1..b8fc160951c 100644 --- a/apis/experimental/v1alpha1/zz_generated.deepcopy.go +++ b/apis/experimental/v1alpha1/zz_generated.deepcopy.go @@ -25,16 +25,32 @@ along with this program. If not, see . package v1alpha1 import ( + "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentStatus) DeepCopyInto(out *ComponentStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentStatus. +func (in *ComponentStatus) DeepCopy() *ComponentStatus { + if in == nil { + return nil + } + out := new(ComponentStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NodeAwareScaler) DeepCopyInto(out *NodeAwareScaler) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - out.Status = in.Status + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAwareScaler. @@ -90,6 +106,11 @@ func (in *NodeAwareScalerList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NodeAwareScalerSpec) DeepCopyInto(out *NodeAwareScalerSpec) { *out = *in + if in.TargetComponentNames != nil { + in, out := &in.TargetComponentNames, &out.TargetComponentNames + *out = make([]string, len(*in)) + copy(*out, *in) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAwareScalerSpec. @@ -105,6 +126,19 @@ func (in *NodeAwareScalerSpec) DeepCopy() *NodeAwareScalerSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NodeAwareScalerStatus) DeepCopyInto(out *NodeAwareScalerStatus) { *out = *in + if in.ComponentStatuses != nil { + in, out := &in.ComponentStatuses, &out.ComponentStatuses + *out = make([]ComponentStatus, len(*in)) + copy(*out, *in) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.LastScaleTime.DeepCopyInto(&out.LastScaleTime) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAwareScalerStatus. From cb8ad654f76a94d48ced6965542a3c932b3cefaa Mon Sep 17 00:00:00 2001 From: free6om Date: Tue, 7 May 2024 15:48:36 +0800 Subject: [PATCH 10/14] document experimental api group --- apis/experimental/doc.go | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 apis/experimental/doc.go diff --git a/apis/experimental/doc.go b/apis/experimental/doc.go new file mode 100644 index 00000000000..e6e399d1139 --- /dev/null +++ b/apis/experimental/doc.go @@ -0,0 +1,29 @@ +/* +Copyright (C) 2022-2024 ApeCloud Co., Ltd + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package experimental is a group of APIs that are in experimental. + +We often encounter needs and ideas from community users, customers, or internal discussions. +These ideas or features typically require a lot of preparatory work before they can be officially supported in KubeBlocks. +This includes requirements analysis, solution research, API design and discussions, solution design and discussions, and so on. +This process often takes a considerable amount of time. + +To quickly validate the feasibility of feature functionalities, an experimental API is now added to KubeBlocks. +This API is used for rapidly verifying the feasibility of a specific feature. +Please note that experimental APIs do not guarantee backward compatibility. +*/ +package experimental From ff8c57c2c5b4fda48a9798699ee74cb99d0fabc2 Mon Sep 17 00:00:00 2001 From: free6om Date: Tue, 7 May 2024 15:49:26 +0800 Subject: [PATCH 11/14] fix lint error --- controllers/experimental/nodeawarescaler_controller.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/controllers/experimental/nodeawarescaler_controller.go b/controllers/experimental/nodeawarescaler_controller.go index 57d45c4b368..43459aa7584 100644 --- a/controllers/experimental/nodeawarescaler_controller.go +++ b/controllers/experimental/nodeawarescaler_controller.go @@ -21,7 +21,6 @@ package experimental import ( "context" - appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" @@ -30,6 +29,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" + appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" experimental "github.com/apecloud/kubeblocks/apis/experimental/v1alpha1" "github.com/apecloud/kubeblocks/pkg/controller/kubebuilderx" ) From 376d9141a651423e0e27ccb7f5589dcfc4fced8d Mon Sep 17 00:00:00 2001 From: free6om Date: Tue, 7 May 2024 17:28:25 +0800 Subject: [PATCH 12/14] rename NodeAwareScaler to NodeCountScaler --- PROJECT | 2 +- ...aler_types.go => nodecountscaler_types.go} | 34 ++++++++--------- .../v1alpha1/zz_generated.deepcopy.go | 38 +++++++++---------- cmd/manager/main.go | 6 +-- ...ental.kubeblocks.io_nodecountscalers.yaml} | 24 ++++++------ config/crd/kustomization.yaml | 6 +-- ...ion_in_experimental_nodecountscalers.yaml} | 2 +- ...ook_in_experimental_nodecountscalers.yaml} | 2 +- ...rimental_nodecountscaler_editor_role.yaml} | 10 ++--- ...rimental_nodecountscaler_viewer_role.yaml} | 10 ++--- config/rbac/role.yaml | 6 +-- ...xperimental_v1alpha1_nodecountscaler.yaml} | 8 ++-- controllers/experimental/cluster_handler.go | 2 +- .../experimental/node_scaling_handler.go | 2 +- ...oller.go => nodecountscaler_controller.go} | 18 ++++----- .../reconciler_scale_target_cluster.go | 2 +- .../reconciler_scale_target_cluster_test.go | 6 +-- .../experimental/reconciler_update_status.go | 4 +- .../reconciler_update_status_test.go | 30 +++++++-------- controllers/experimental/suite_test.go | 10 ++--- controllers/experimental/tree_loader.go | 4 +- controllers/experimental/tree_loader_test.go | 6 +-- deploy/helm/config/rbac/role.yaml | 6 +-- ...ental.kubeblocks.io_nodecountscalers.yaml} | 24 ++++++------ ...rimental_nodecountscaler_editor_role.yaml} | 8 ++-- ...rimental_nodecountscaler_viewer_role.yaml} | 8 ++-- ...scaler.go => builder_node_count_scaler.go} | 14 +++---- ...t.go => builder_node_count_scaler_test.go} | 12 +++--- 28 files changed, 152 insertions(+), 152 deletions(-) rename apis/experimental/v1alpha1/{nodeawarescaler_types.go => nodecountscaler_types.go} (82%) rename config/crd/bases/{experimental.kubeblocks.io_nodeawarescalers.yaml => experimental.kubeblocks.io_nodecountscalers.yaml} (93%) rename config/crd/patches/{cainjection_in_experimental_nodeawarescalers.yaml => cainjection_in_experimental_nodecountscalers.yaml} (83%) rename config/crd/patches/{webhook_in_experimental_nodeawarescalers.yaml => webhook_in_experimental_nodecountscalers.yaml} (87%) rename config/rbac/{experimental_nodeawarescaler_editor_role.yaml => experimental_nodecountscaler_editor_role.yaml} (71%) rename config/rbac/{experimental_nodeawarescaler_viewer_role.yaml => experimental_nodecountscaler_viewer_role.yaml} (69%) rename config/samples/{experimental_v1alpha1_nodeawarescaler.yaml => experimental_v1alpha1_nodecountscaler.yaml} (60%) rename controllers/experimental/{nodeawarescaler_controller.go => nodecountscaler_controller.go} (84%) rename deploy/helm/crds/{experimental.kubeblocks.io_nodeawarescalers.yaml => experimental.kubeblocks.io_nodecountscalers.yaml} (93%) rename deploy/helm/templates/rbac/{experimental_nodeawarescaler_editor_role.yaml => experimental_nodecountscaler_editor_role.yaml} (68%) rename deploy/helm/templates/rbac/{experimental_nodeawarescaler_viewer_role.yaml => experimental_nodecountscaler_viewer_role.yaml} (65%) rename pkg/controller/builder/{builder_node_aware_scaler.go => builder_node_count_scaler.go} (65%) rename pkg/controller/builder/{builder_node_aware_scaler_test.go => builder_node_count_scaler_test.go} (77%) diff --git a/PROJECT b/PROJECT index a5988cade67..01f0bd6bb87 100644 --- a/PROJECT +++ b/PROJECT @@ -252,7 +252,7 @@ resources: controller: true domain: kubeblocks.io group: experimental - kind: NodeAwareScaler + kind: NodeCountScaler path: github.com/apecloud/kubeblocks/apis/experimental/v1alpha1 version: v1alpha1 version: "3" diff --git a/apis/experimental/v1alpha1/nodeawarescaler_types.go b/apis/experimental/v1alpha1/nodecountscaler_types.go similarity index 82% rename from apis/experimental/v1alpha1/nodeawarescaler_types.go rename to apis/experimental/v1alpha1/nodecountscaler_types.go index d964d674eb3..e317752551d 100644 --- a/apis/experimental/v1alpha1/nodeawarescaler_types.go +++ b/apis/experimental/v1alpha1/nodecountscaler_types.go @@ -26,8 +26,8 @@ import ( // EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. -// NodeAwareScalerSpec defines the desired state of NodeAwareScaler -type NodeAwareScalerSpec struct { +// NodeCountScalerSpec defines the desired state of NodeCountScaler +type NodeCountScalerSpec struct { // Specified the target Cluster name this scaler applies to. TargetClusterName string `json:"targetClusterName"` @@ -38,14 +38,14 @@ type NodeAwareScalerSpec struct { TargetComponentNames []string `json:"targetComponentNames,omitempty"` } -// NodeAwareScalerStatus defines the observed state of NodeAwareScaler -type NodeAwareScalerStatus struct { - // Records the current status information of all Components specified in the NodeAwareScalerSpec. +// NodeCountScalerStatus defines the observed state of NodeCountScaler +type NodeCountScalerStatus struct { + // Records the current status information of all Components specified in the NodeCountScalerSpec. // // +optional ComponentStatuses []ComponentStatus `json:"componentStatuses,omitempty"` - // Represents the latest available observations of a nodeawarescaler's current state. + // Represents the latest available observations of a nodecountscaler's current state. // Known .status.conditions.type are: "ScaleReady". // ScaleReady - All target components are ready. // @@ -56,7 +56,7 @@ type NodeAwareScalerStatus struct { // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` - // LastScaleTime is the last time the NodeAwareScaler scaled the number of instances. + // LastScaleTime is the last time the NodeCountScaler scaled the number of instances. // // +optional LastScaleTime metav1.Time `json:"lastScaleTime,omitempty"` @@ -83,7 +83,7 @@ type ComponentStatus struct { type ConditionType string const ( - // ScaleReady is added to a nodeawarescaler when all target components are ready. + // ScaleReady is added to a nodecountscaler when all target components are ready. ScaleReady ConditionType = "ScaleReady" ) @@ -98,31 +98,31 @@ const ( // +genclient // +kubebuilder:object:root=true // +kubebuilder:subresource:status -// +kubebuilder:resource:categories={kubeblocks,all},shortName=nas +// +kubebuilder:resource:categories={kubeblocks,all},shortName=ncs // +kubebuilder:printcolumn:name="TARGET-CLUSTER-NAME",type="string",JSONPath=".spec.targetClusterName",description="target cluster name." // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type==\"ScaleReady\")].status",description="scale ready." // +kubebuilder:printcolumn:name="REASON",type="string",JSONPath=".status.conditions[?(@.type==\"ScaleReady\")].reason",description="reason." // +kubebuilder:printcolumn:name="MESSAGE",type="string",JSONPath=".status.conditions[?(@.type==\"ScaleReady\")].message",description="message." // +kubebuilder:printcolumn:name="LAST-SCALE-TIME",type="date",JSONPath=".status.lastScaleTime" -// NodeAwareScaler is the Schema for the nodeawarescalers API -type NodeAwareScaler struct { +// NodeCountScaler is the Schema for the nodecountscalers API +type NodeCountScaler struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` - Spec NodeAwareScalerSpec `json:"spec,omitempty"` - Status NodeAwareScalerStatus `json:"status,omitempty"` + Spec NodeCountScalerSpec `json:"spec,omitempty"` + Status NodeCountScalerStatus `json:"status,omitempty"` } //+kubebuilder:object:root=true -// NodeAwareScalerList contains a list of NodeAwareScaler -type NodeAwareScalerList struct { +// NodeCountScalerList contains a list of NodeCountScaler +type NodeCountScalerList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` - Items []NodeAwareScaler `json:"items"` + Items []NodeCountScaler `json:"items"` } func init() { - SchemeBuilder.Register(&NodeAwareScaler{}, &NodeAwareScalerList{}) + SchemeBuilder.Register(&NodeCountScaler{}, &NodeCountScalerList{}) } diff --git a/apis/experimental/v1alpha1/zz_generated.deepcopy.go b/apis/experimental/v1alpha1/zz_generated.deepcopy.go index b8fc160951c..71393796dc8 100644 --- a/apis/experimental/v1alpha1/zz_generated.deepcopy.go +++ b/apis/experimental/v1alpha1/zz_generated.deepcopy.go @@ -45,7 +45,7 @@ func (in *ComponentStatus) DeepCopy() *ComponentStatus { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeAwareScaler) DeepCopyInto(out *NodeAwareScaler) { +func (in *NodeCountScaler) DeepCopyInto(out *NodeCountScaler) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) @@ -53,18 +53,18 @@ func (in *NodeAwareScaler) DeepCopyInto(out *NodeAwareScaler) { in.Status.DeepCopyInto(&out.Status) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAwareScaler. -func (in *NodeAwareScaler) DeepCopy() *NodeAwareScaler { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeCountScaler. +func (in *NodeCountScaler) DeepCopy() *NodeCountScaler { if in == nil { return nil } - out := new(NodeAwareScaler) + out := new(NodeCountScaler) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NodeAwareScaler) DeepCopyObject() runtime.Object { +func (in *NodeCountScaler) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -72,31 +72,31 @@ func (in *NodeAwareScaler) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeAwareScalerList) DeepCopyInto(out *NodeAwareScalerList) { +func (in *NodeCountScalerList) DeepCopyInto(out *NodeCountScalerList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]NodeAwareScaler, len(*in)) + *out = make([]NodeCountScaler, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAwareScalerList. -func (in *NodeAwareScalerList) DeepCopy() *NodeAwareScalerList { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeCountScalerList. +func (in *NodeCountScalerList) DeepCopy() *NodeCountScalerList { if in == nil { return nil } - out := new(NodeAwareScalerList) + out := new(NodeCountScalerList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NodeAwareScalerList) DeepCopyObject() runtime.Object { +func (in *NodeCountScalerList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -104,7 +104,7 @@ func (in *NodeAwareScalerList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeAwareScalerSpec) DeepCopyInto(out *NodeAwareScalerSpec) { +func (in *NodeCountScalerSpec) DeepCopyInto(out *NodeCountScalerSpec) { *out = *in if in.TargetComponentNames != nil { in, out := &in.TargetComponentNames, &out.TargetComponentNames @@ -113,18 +113,18 @@ func (in *NodeAwareScalerSpec) DeepCopyInto(out *NodeAwareScalerSpec) { } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAwareScalerSpec. -func (in *NodeAwareScalerSpec) DeepCopy() *NodeAwareScalerSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeCountScalerSpec. +func (in *NodeCountScalerSpec) DeepCopy() *NodeCountScalerSpec { if in == nil { return nil } - out := new(NodeAwareScalerSpec) + out := new(NodeCountScalerSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeAwareScalerStatus) DeepCopyInto(out *NodeAwareScalerStatus) { +func (in *NodeCountScalerStatus) DeepCopyInto(out *NodeCountScalerStatus) { *out = *in if in.ComponentStatuses != nil { in, out := &in.ComponentStatuses, &out.ComponentStatuses @@ -141,12 +141,12 @@ func (in *NodeAwareScalerStatus) DeepCopyInto(out *NodeAwareScalerStatus) { in.LastScaleTime.DeepCopyInto(&out.LastScaleTime) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAwareScalerStatus. -func (in *NodeAwareScalerStatus) DeepCopy() *NodeAwareScalerStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeCountScalerStatus. +func (in *NodeCountScalerStatus) DeepCopy() *NodeCountScalerStatus { if in == nil { return nil } - out := new(NodeAwareScalerStatus) + out := new(NodeCountScalerStatus) in.DeepCopyInto(out) return out } diff --git a/cmd/manager/main.go b/cmd/manager/main.go index 7d33d1daf7f..4514510f429 100644 --- a/cmd/manager/main.go +++ b/cmd/manager/main.go @@ -518,12 +518,12 @@ func main() { } if viper.GetBool(experimentalFlagKey.viperName()) { - if err = (&experimentalcontrollers.NodeAwareScalerReconciler{ + if err = (&experimentalcontrollers.NodeCountScalerReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), - Recorder: mgr.GetEventRecorderFor("node-aware-scaler-controller"), + Recorder: mgr.GetEventRecorderFor("node-count-scaler-controller"), }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "NodeAwareScaler") + setupLog.Error(err, "unable to create controller", "controller", "NodeCountScaler") os.Exit(1) } } diff --git a/config/crd/bases/experimental.kubeblocks.io_nodeawarescalers.yaml b/config/crd/bases/experimental.kubeblocks.io_nodecountscalers.yaml similarity index 93% rename from config/crd/bases/experimental.kubeblocks.io_nodeawarescalers.yaml rename to config/crd/bases/experimental.kubeblocks.io_nodecountscalers.yaml index 37253f5b894..73eba8e34ac 100644 --- a/config/crd/bases/experimental.kubeblocks.io_nodeawarescalers.yaml +++ b/config/crd/bases/experimental.kubeblocks.io_nodecountscalers.yaml @@ -5,19 +5,19 @@ metadata: controller-gen.kubebuilder.io/version: v0.12.1 labels: app.kubernetes.io/name: kubeblocks - name: nodeawarescalers.experimental.kubeblocks.io + name: nodecountscalers.experimental.kubeblocks.io spec: group: experimental.kubeblocks.io names: categories: - kubeblocks - all - kind: NodeAwareScaler - listKind: NodeAwareScalerList - plural: nodeawarescalers + kind: NodeCountScaler + listKind: NodeCountScalerList + plural: nodecountscalers shortNames: - - nas - singular: nodeawarescaler + - ncs + singular: nodecountscaler scope: Namespaced versions: - additionalPrinterColumns: @@ -43,7 +43,7 @@ spec: name: v1alpha1 schema: openAPIV3Schema: - description: NodeAwareScaler is the Schema for the nodeawarescalers API + description: NodeCountScaler is the Schema for the nodecountscalers API properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation @@ -58,7 +58,7 @@ spec: metadata: type: object spec: - description: NodeAwareScalerSpec defines the desired state of NodeAwareScaler + description: NodeCountScalerSpec defines the desired state of NodeCountScaler properties: targetClusterName: description: Specified the target Cluster name this scaler applies @@ -74,11 +74,11 @@ spec: - targetClusterName type: object status: - description: NodeAwareScalerStatus defines the observed state of NodeAwareScaler + description: NodeCountScalerStatus defines the observed state of NodeCountScaler properties: componentStatuses: description: Records the current status information of all Components - specified in the NodeAwareScalerSpec. + specified in the NodeCountScalerSpec. items: properties: availableReplicas: @@ -113,7 +113,7 @@ spec: type: object type: array conditions: - description: 'Represents the latest available observations of a nodeawarescaler''s + description: 'Represents the latest available observations of a nodecountscaler''s current state. Known .status.conditions.type are: "ScaleReady". ScaleReady - All target components are ready.' items: @@ -187,7 +187,7 @@ spec: - type x-kubernetes-list-type: map lastScaleTime: - description: LastScaleTime is the last time the NodeAwareScaler scaled + description: LastScaleTime is the last time the NodeCountScaler scaled the number of instances. format: date-time type: string diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index b10d341efd9..1a5378fdb52 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -24,7 +24,7 @@ resources: - bases/apps.kubeblocks.io_opsdefinitions.yaml - bases/apps.kubeblocks.io_componentversions.yaml - bases/dataprotection.kubeblocks.io_storageproviders.yaml -- bases/experimental.kubeblocks.io_nodeawarescalers.yaml +- bases/experimental.kubeblocks.io_nodecountscalers.yaml #+kubebuilder:scaffold:crdkustomizeresource patchesStrategicMerge: @@ -53,7 +53,7 @@ patchesStrategicMerge: #- patches/webhook_in_components.yaml #- patches/webhook_in_opsdefinitions.yaml #- patches/webhook_in_componentversions.yaml -#- patches/webhook_in_nodeawarescalers.yaml +#- patches/webhook_in_nodecountscalers.yaml #+kubebuilder:scaffold:crdkustomizewebhookpatch # [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix. @@ -81,7 +81,7 @@ patchesStrategicMerge: #- patches/cainjection_in_components.yaml #- patches/cainjection_in_opsdefinitions.yaml #- patches/cainjection_in_componentversions.yaml -#- patches/cainjection_in_nodeawarescalers.yaml +#- patches/cainjection_in_nodecountscalers.yaml #+kubebuilder:scaffold:crdkustomizecainjectionpatch # the following config is for teaching kustomize how to do kustomization for CRDs. diff --git a/config/crd/patches/cainjection_in_experimental_nodeawarescalers.yaml b/config/crd/patches/cainjection_in_experimental_nodecountscalers.yaml similarity index 83% rename from config/crd/patches/cainjection_in_experimental_nodeawarescalers.yaml rename to config/crd/patches/cainjection_in_experimental_nodecountscalers.yaml index 82bccd6f467..05b04101b88 100644 --- a/config/crd/patches/cainjection_in_experimental_nodeawarescalers.yaml +++ b/config/crd/patches/cainjection_in_experimental_nodecountscalers.yaml @@ -4,4 +4,4 @@ kind: CustomResourceDefinition metadata: annotations: cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) - name: nodeawarescalers.experimental.kubeblocks.io + name: nodecountscalers.experimental.kubeblocks.io diff --git a/config/crd/patches/webhook_in_experimental_nodeawarescalers.yaml b/config/crd/patches/webhook_in_experimental_nodecountscalers.yaml similarity index 87% rename from config/crd/patches/webhook_in_experimental_nodeawarescalers.yaml rename to config/crd/patches/webhook_in_experimental_nodecountscalers.yaml index 20fb5af220f..4eed0e885a7 100644 --- a/config/crd/patches/webhook_in_experimental_nodeawarescalers.yaml +++ b/config/crd/patches/webhook_in_experimental_nodecountscalers.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: nodeawarescalers.experimental.kubeblocks.io + name: nodecountscalers.experimental.kubeblocks.io spec: conversion: strategy: Webhook diff --git a/config/rbac/experimental_nodeawarescaler_editor_role.yaml b/config/rbac/experimental_nodecountscaler_editor_role.yaml similarity index 71% rename from config/rbac/experimental_nodeawarescaler_editor_role.yaml rename to config/rbac/experimental_nodecountscaler_editor_role.yaml index 1153ec84285..13f543ffa30 100644 --- a/config/rbac/experimental_nodeawarescaler_editor_role.yaml +++ b/config/rbac/experimental_nodecountscaler_editor_role.yaml @@ -1,20 +1,20 @@ -# permissions for end users to edit nodeawarescalers. +# permissions for end users to edit nodecountscalers. apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: app.kubernetes.io/name: clusterrole - app.kubernetes.io/instance: nodeawarescaler-editor-role + app.kubernetes.io/instance: nodecountscaler-editor-role app.kubernetes.io/component: rbac app.kubernetes.io/created-by: kubeblocks app.kubernetes.io/part-of: kubeblocks app.kubernetes.io/managed-by: kustomize - name: nodeawarescaler-editor-role + name: nodecountscaler-editor-role rules: - apiGroups: - experimental.kubeblocks.io resources: - - nodeawarescalers + - nodecountscalers verbs: - create - delete @@ -26,6 +26,6 @@ rules: - apiGroups: - experimental.kubeblocks.io resources: - - nodeawarescalers/status + - nodecountscalers/status verbs: - get diff --git a/config/rbac/experimental_nodeawarescaler_viewer_role.yaml b/config/rbac/experimental_nodecountscaler_viewer_role.yaml similarity index 69% rename from config/rbac/experimental_nodeawarescaler_viewer_role.yaml rename to config/rbac/experimental_nodecountscaler_viewer_role.yaml index a1fba19a052..742e37d3337 100644 --- a/config/rbac/experimental_nodeawarescaler_viewer_role.yaml +++ b/config/rbac/experimental_nodecountscaler_viewer_role.yaml @@ -1,20 +1,20 @@ -# permissions for end users to view nodeawarescalers. +# permissions for end users to view nodecountscalers. apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: app.kubernetes.io/name: clusterrole - app.kubernetes.io/instance: nodeawarescaler-viewer-role + app.kubernetes.io/instance: nodecountscaler-viewer-role app.kubernetes.io/component: rbac app.kubernetes.io/created-by: kubeblocks app.kubernetes.io/part-of: kubeblocks app.kubernetes.io/managed-by: kustomize - name: nodeawarescaler-viewer-role + name: nodecountscaler-viewer-role rules: - apiGroups: - experimental.kubeblocks.io resources: - - nodeawarescalers + - nodecountscalers verbs: - get - list @@ -22,6 +22,6 @@ rules: - apiGroups: - experimental.kubeblocks.io resources: - - nodeawarescalers/status + - nodecountscalers/status verbs: - get diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index e3218c245bd..67c97244b19 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -790,7 +790,7 @@ rules: - apiGroups: - experimental.kubeblocks.io resources: - - nodeawarescalers + - nodecountscalers verbs: - create - delete @@ -802,13 +802,13 @@ rules: - apiGroups: - experimental.kubeblocks.io resources: - - nodeawarescalers/finalizers + - nodecountscalers/finalizers verbs: - update - apiGroups: - experimental.kubeblocks.io resources: - - nodeawarescalers/status + - nodecountscalers/status verbs: - get - patch diff --git a/config/samples/experimental_v1alpha1_nodeawarescaler.yaml b/config/samples/experimental_v1alpha1_nodecountscaler.yaml similarity index 60% rename from config/samples/experimental_v1alpha1_nodeawarescaler.yaml rename to config/samples/experimental_v1alpha1_nodecountscaler.yaml index eab21dd30f7..1e799a39647 100644 --- a/config/samples/experimental_v1alpha1_nodeawarescaler.yaml +++ b/config/samples/experimental_v1alpha1_nodecountscaler.yaml @@ -1,12 +1,12 @@ apiVersion: experimental.kubeblocks.io/v1alpha1 -kind: NodeAwareScaler +kind: NodeCountScaler metadata: labels: - app.kubernetes.io/name: nodeawarescaler - app.kubernetes.io/instance: nodeawarescaler-sample + app.kubernetes.io/name: nodecountscaler + app.kubernetes.io/instance: nodecountscaler-sample app.kubernetes.io/part-of: kubeblocks app.kubernetes.io/managed-by: kustomize app.kubernetes.io/created-by: kubeblocks - name: nodeawarescaler-sample + name: nodecountscaler-sample spec: # TODO(user): Add fields here diff --git a/controllers/experimental/cluster_handler.go b/controllers/experimental/cluster_handler.go index a97775ca236..7b3aeae3224 100644 --- a/controllers/experimental/cluster_handler.go +++ b/controllers/experimental/cluster_handler.go @@ -51,7 +51,7 @@ func (h *clusterHandler) Generic(ctx context.Context, event event.GenericEvent, } func (h *clusterHandler) mapAndEnqueue(ctx context.Context, q workqueue.RateLimitingInterface, object client.Object) { - scalerList := &experimental.NodeAwareScalerList{} + scalerList := &experimental.NodeCountScalerList{} if err := h.Client.List(ctx, scalerList); err == nil { for _, item := range scalerList.Items { if item.Spec.TargetClusterName == object.GetName() && diff --git a/controllers/experimental/node_scaling_handler.go b/controllers/experimental/node_scaling_handler.go index 575b2e69a82..9e30d49842a 100644 --- a/controllers/experimental/node_scaling_handler.go +++ b/controllers/experimental/node_scaling_handler.go @@ -51,7 +51,7 @@ func (h *nodeScalingHandler) Generic(ctx context.Context, event event.GenericEve } func (h *nodeScalingHandler) mapAndEnqueue(ctx context.Context, q workqueue.RateLimitingInterface) { - scalerList := &experimental.NodeAwareScalerList{} + scalerList := &experimental.NodeCountScalerList{} if err := h.Client.List(ctx, scalerList); err == nil { for _, item := range scalerList.Items { q.Add(ctrl.Request{NamespacedName: types.NamespacedName{Namespace: item.Namespace, Name: item.Name}}) diff --git a/controllers/experimental/nodeawarescaler_controller.go b/controllers/experimental/nodecountscaler_controller.go similarity index 84% rename from controllers/experimental/nodeawarescaler_controller.go rename to controllers/experimental/nodecountscaler_controller.go index 43459aa7584..00b8db6bd58 100644 --- a/controllers/experimental/nodeawarescaler_controller.go +++ b/controllers/experimental/nodecountscaler_controller.go @@ -34,16 +34,16 @@ import ( "github.com/apecloud/kubeblocks/pkg/controller/kubebuilderx" ) -// NodeAwareScalerReconciler reconciles a NodeAwareScaler object -type NodeAwareScalerReconciler struct { +// NodeCountScalerReconciler reconciles a NodeCountScaler object +type NodeCountScalerReconciler struct { client.Client Scheme *runtime.Scheme Recorder record.EventRecorder } -//+kubebuilder:rbac:groups=experimental.kubeblocks.io,resources=nodeawarescalers,verbs=get;list;watch;create;update;patch;delete -//+kubebuilder:rbac:groups=experimental.kubeblocks.io,resources=nodeawarescalers/status,verbs=get;update;patch -//+kubebuilder:rbac:groups=experimental.kubeblocks.io,resources=nodeawarescalers/finalizers,verbs=update +//+kubebuilder:rbac:groups=experimental.kubeblocks.io,resources=nodecountscalers,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=experimental.kubeblocks.io,resources=nodecountscalers/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=experimental.kubeblocks.io,resources=nodecountscalers/finalizers,verbs=update // +kubebuilder:rbac:groups=apps.kubeblocks.io,resources=clusters,verbs=get;list;watch;update;patch @@ -57,8 +57,8 @@ type NodeAwareScalerReconciler struct { // // For more details, check Reconcile and its Result here: // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.14.4/pkg/reconcile -func (r *NodeAwareScalerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - logger := log.FromContext(ctx).WithValues("NodeAwareScaler", req.NamespacedName) +func (r *NodeCountScalerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger := log.FromContext(ctx).WithValues("NodeCountScaler", req.NamespacedName) err := kubebuilderx.NewController(ctx, r.Client, req, r.Recorder, logger). Prepare(objectTree()). @@ -70,9 +70,9 @@ func (r *NodeAwareScalerReconciler) Reconcile(ctx context.Context, req ctrl.Requ } // SetupWithManager sets up the controller with the Manager. -func (r *NodeAwareScalerReconciler) SetupWithManager(mgr ctrl.Manager) error { +func (r *NodeCountScalerReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). - For(&experimental.NodeAwareScaler{}). + For(&experimental.NodeCountScaler{}). Watches(&corev1.Node{}, &nodeScalingHandler{r.Client}). Watches(&appsv1alpha1.Cluster{}, &clusterHandler{r.Client}). Complete(r) diff --git a/controllers/experimental/reconciler_scale_target_cluster.go b/controllers/experimental/reconciler_scale_target_cluster.go index 2e8e4f854d0..0b8063dfa5e 100644 --- a/controllers/experimental/reconciler_scale_target_cluster.go +++ b/controllers/experimental/reconciler_scale_target_cluster.go @@ -43,7 +43,7 @@ func (r *scaleTargetClusterReconciler) PreCondition(tree *kubebuilderx.ObjectTre } func (r *scaleTargetClusterReconciler) Reconcile(tree *kubebuilderx.ObjectTree) (*kubebuilderx.ObjectTree, error) { - scaler, _ := tree.GetRoot().(*experimental.NodeAwareScaler) + scaler, _ := tree.GetRoot().(*experimental.NodeCountScaler) clusterKey := builder.NewClusterBuilder(scaler.Namespace, scaler.Spec.TargetClusterName).GetObject() object, err := tree.Get(clusterKey) if err != nil { diff --git a/controllers/experimental/reconciler_scale_target_cluster_test.go b/controllers/experimental/reconciler_scale_target_cluster_test.go index c8f2e8559a1..0c7e471a08f 100644 --- a/controllers/experimental/reconciler_scale_target_cluster_test.go +++ b/controllers/experimental/reconciler_scale_target_cluster_test.go @@ -47,10 +47,10 @@ var _ = Describe("scale target cluster reconciler test", func() { beforeReconcile := metav1.Now() newTree, err := reconciler.Reconcile(tree) Expect(err).Should(BeNil()) - newNAS, ok := newTree.GetRoot().(*experimentalv1alpha1.NodeAwareScaler) + newNCS, ok := newTree.GetRoot().(*experimentalv1alpha1.NodeCountScaler) Expect(ok).Should(BeTrue()) - Expect(newNAS.Status.LastScaleTime.Compare(beforeReconcile.Time)).Should(BeNumerically(">=", 0)) - object, err := newTree.Get(builder.NewClusterBuilder(newNAS.Namespace, newNAS.Spec.TargetClusterName).GetObject()) + Expect(newNCS.Status.LastScaleTime.Compare(beforeReconcile.Time)).Should(BeNumerically(">=", 0)) + object, err := newTree.Get(builder.NewClusterBuilder(newNCS.Namespace, newNCS.Spec.TargetClusterName).GetObject()) Expect(err).Should(BeNil()) newCluster, ok := object.(*appsv1alpha1.Cluster) Expect(ok).Should(BeTrue()) diff --git a/controllers/experimental/reconciler_update_status.go b/controllers/experimental/reconciler_update_status.go index ce19735a966..b72866e79d9 100644 --- a/controllers/experimental/reconciler_update_status.go +++ b/controllers/experimental/reconciler_update_status.go @@ -47,7 +47,7 @@ func (r *updateStatusReconciler) PreCondition(tree *kubebuilderx.ObjectTree) *ku } func (r *updateStatusReconciler) Reconcile(tree *kubebuilderx.ObjectTree) (*kubebuilderx.ObjectTree, error) { - scaler, _ := tree.GetRoot().(*experimental.NodeAwareScaler) + scaler, _ := tree.GetRoot().(*experimental.NodeCountScaler) itsList := tree.List(&workloads.InstanceSet{}) nodes := tree.List(&corev1.Node{}) // TODO(free6om): filter nodes that satisfy pod template spec of each component (by nodeSelector, nodeAffinity&nodeAntiAffinity, tolerations) @@ -84,7 +84,7 @@ func (r *updateStatusReconciler) Reconcile(tree *kubebuilderx.ObjectTree) (*kube return tree, nil } -func buildScaleReadyCondition(scaler *experimental.NodeAwareScaler) *metav1.Condition { +func buildScaleReadyCondition(scaler *experimental.NodeCountScaler) *metav1.Condition { var ( ready = true notReadyNames []string diff --git a/controllers/experimental/reconciler_update_status_test.go b/controllers/experimental/reconciler_update_status_test.go index a9887525abe..93154daf45e 100644 --- a/controllers/experimental/reconciler_update_status_test.go +++ b/controllers/experimental/reconciler_update_status_test.go @@ -63,22 +63,22 @@ var _ = Describe("update status reconciler test", func() { Expect(reconciler.PreCondition(newTree)).Should(Equal(kubebuilderx.ResultSatisfied)) newTree, err = reconciler.Reconcile(tree) Expect(err).Should(BeNil()) - newNAS, ok := newTree.GetRoot().(*experimentalv1alpha1.NodeAwareScaler) + newNCS, ok := newTree.GetRoot().(*experimentalv1alpha1.NodeCountScaler) Expect(ok).Should(BeTrue()) - Expect(newNAS.Status.ComponentStatuses).Should(HaveLen(2)) - Expect(newNAS.Status.ComponentStatuses[0].CurrentReplicas).Should(Equal(desiredReplicas)) - Expect(newNAS.Status.ComponentStatuses[0].ReadyReplicas).Should(Equal(desiredReplicas)) - Expect(newNAS.Status.ComponentStatuses[0].AvailableReplicas).Should(Equal(desiredReplicas)) - Expect(newNAS.Status.ComponentStatuses[0].DesiredReplicas).Should(Equal(desiredReplicas)) - Expect(newNAS.Status.ComponentStatuses[1].CurrentReplicas).Should(Equal(desiredReplicas)) - Expect(newNAS.Status.ComponentStatuses[1].ReadyReplicas).Should(Equal(desiredReplicas)) - Expect(newNAS.Status.ComponentStatuses[1].AvailableReplicas).Should(Equal(desiredReplicas)) - Expect(newNAS.Status.ComponentStatuses[1].DesiredReplicas).Should(Equal(desiredReplicas)) - Expect(newNAS.Status.Conditions).Should(HaveLen(1)) - Expect(newNAS.Status.Conditions[0].Type).Should(BeEquivalentTo(experimentalv1alpha1.ScaleReady)) - Expect(newNAS.Status.Conditions[0].Status).Should(Equal(metav1.ConditionTrue)) - Expect(newNAS.Status.Conditions[0].Reason).Should(Equal(experimentalv1alpha1.ReasonReady)) - Expect(newNAS.Status.Conditions[0].Message).Should(Equal("scale ready")) + Expect(newNCS.Status.ComponentStatuses).Should(HaveLen(2)) + Expect(newNCS.Status.ComponentStatuses[0].CurrentReplicas).Should(Equal(desiredReplicas)) + Expect(newNCS.Status.ComponentStatuses[0].ReadyReplicas).Should(Equal(desiredReplicas)) + Expect(newNCS.Status.ComponentStatuses[0].AvailableReplicas).Should(Equal(desiredReplicas)) + Expect(newNCS.Status.ComponentStatuses[0].DesiredReplicas).Should(Equal(desiredReplicas)) + Expect(newNCS.Status.ComponentStatuses[1].CurrentReplicas).Should(Equal(desiredReplicas)) + Expect(newNCS.Status.ComponentStatuses[1].ReadyReplicas).Should(Equal(desiredReplicas)) + Expect(newNCS.Status.ComponentStatuses[1].AvailableReplicas).Should(Equal(desiredReplicas)) + Expect(newNCS.Status.ComponentStatuses[1].DesiredReplicas).Should(Equal(desiredReplicas)) + Expect(newNCS.Status.Conditions).Should(HaveLen(1)) + Expect(newNCS.Status.Conditions[0].Type).Should(BeEquivalentTo(experimentalv1alpha1.ScaleReady)) + Expect(newNCS.Status.Conditions[0].Status).Should(Equal(metav1.ConditionTrue)) + Expect(newNCS.Status.Conditions[0].Reason).Should(Equal(experimentalv1alpha1.ReasonReady)) + Expect(newNCS.Status.Conditions[0].Message).Should(Equal("scale ready")) }) }) }) diff --git a/controllers/experimental/suite_test.go b/controllers/experimental/suite_test.go index 59cbb88787a..e5484bd8fad 100644 --- a/controllers/experimental/suite_test.go +++ b/controllers/experimental/suite_test.go @@ -49,14 +49,14 @@ const ( ) var ( - tree *kubebuilderx.ObjectTree - nas *experimentalv1alpha1.NodeAwareScaler - clusterName = "foo" + tree *kubebuilderx.ObjectTree + ncs *experimentalv1alpha1.NodeCountScaler + clusterName = "foo" componentNames = []string{"bar-0", "bar-1"} ) func mockTestTree() *kubebuilderx.ObjectTree { - nas = builder.NewNodeAwareScalerBuilder(namespace, name). + ncs = builder.NewNodeCountScalerBuilder(namespace, name). SetTargetClusterName(clusterName). SetTargetComponentNames(componentNames). GetObject() @@ -86,7 +86,7 @@ func mockTestTree() *kubebuilderx.ObjectTree { } tree = kubebuilderx.NewObjectTree() - tree.SetRoot(nas) + tree.SetRoot(ncs) Expect(tree.Add(cluster, its0, its1, node0, node1)) return tree diff --git a/controllers/experimental/tree_loader.go b/controllers/experimental/tree_loader.go index 543e9a617b6..1b9cb35e669 100644 --- a/controllers/experimental/tree_loader.go +++ b/controllers/experimental/tree_loader.go @@ -39,7 +39,7 @@ import ( type treeLoader struct{} func (t *treeLoader) Load(ctx context.Context, reader client.Reader, req ctrl.Request, recorder record.EventRecorder, logger logr.Logger) (*kubebuilderx.ObjectTree, error) { - tree, err := kubebuilderx.ReadObjectTree[*experimental.NodeAwareScaler](ctx, reader, req, nil) + tree, err := kubebuilderx.ReadObjectTree[*experimental.NodeCountScaler](ctx, reader, req, nil) if err != nil { return nil, err } @@ -47,7 +47,7 @@ func (t *treeLoader) Load(ctx context.Context, reader client.Reader, req ctrl.Re if root == nil { return tree, nil } - scaler, _ := root.(*experimental.NodeAwareScaler) + scaler, _ := root.(*experimental.NodeCountScaler) key := types.NamespacedName{Namespace: scaler.Namespace, Name: scaler.Spec.TargetClusterName} cluster := &appsv1alpha1.Cluster{} if err = reader.Get(ctx, key, cluster); err != nil { diff --git a/controllers/experimental/tree_loader_test.go b/controllers/experimental/tree_loader_test.go index c07e291d4f0..7a6f6eb9185 100644 --- a/controllers/experimental/tree_loader_test.go +++ b/controllers/experimental/tree_loader_test.go @@ -50,7 +50,7 @@ var _ = Describe("tree loader test", func() { clusterName := "foo" componentNames := []string{"bar-0", "bar-1"} - root := builder.NewNodeAwareScalerBuilder(namespace, name).SetTargetClusterName(clusterName).SetTargetComponentNames(componentNames).GetObject() + root := builder.NewNodeCountScalerBuilder(namespace, name).SetTargetClusterName(clusterName).SetTargetComponentNames(componentNames).GetObject() cluster := builder.NewClusterBuilder(namespace, clusterName).GetObject() its0 := builder.NewInstanceSetBuilder(namespace, constant.GenerateClusterComponentName(clusterName, componentNames[0])).GetObject() its1 := builder.NewInstanceSetBuilder(namespace, constant.GenerateClusterComponentName(clusterName, componentNames[1])).GetObject() @@ -68,8 +68,8 @@ var _ = Describe("tree loader test", func() { } k8sMock.EXPECT(). - Get(gomock.Any(), gomock.Any(), &experimental.NodeAwareScaler{}, gomock.Any()). - DoAndReturn(func(_ context.Context, objKey client.ObjectKey, obj *experimental.NodeAwareScaler, _ ...client.GetOption) error { + Get(gomock.Any(), gomock.Any(), &experimental.NodeCountScaler{}, gomock.Any()). + DoAndReturn(func(_ context.Context, objKey client.ObjectKey, obj *experimental.NodeCountScaler, _ ...client.GetOption) error { *obj = *root return nil }).Times(1) diff --git a/deploy/helm/config/rbac/role.yaml b/deploy/helm/config/rbac/role.yaml index e3218c245bd..67c97244b19 100644 --- a/deploy/helm/config/rbac/role.yaml +++ b/deploy/helm/config/rbac/role.yaml @@ -790,7 +790,7 @@ rules: - apiGroups: - experimental.kubeblocks.io resources: - - nodeawarescalers + - nodecountscalers verbs: - create - delete @@ -802,13 +802,13 @@ rules: - apiGroups: - experimental.kubeblocks.io resources: - - nodeawarescalers/finalizers + - nodecountscalers/finalizers verbs: - update - apiGroups: - experimental.kubeblocks.io resources: - - nodeawarescalers/status + - nodecountscalers/status verbs: - get - patch diff --git a/deploy/helm/crds/experimental.kubeblocks.io_nodeawarescalers.yaml b/deploy/helm/crds/experimental.kubeblocks.io_nodecountscalers.yaml similarity index 93% rename from deploy/helm/crds/experimental.kubeblocks.io_nodeawarescalers.yaml rename to deploy/helm/crds/experimental.kubeblocks.io_nodecountscalers.yaml index 37253f5b894..73eba8e34ac 100644 --- a/deploy/helm/crds/experimental.kubeblocks.io_nodeawarescalers.yaml +++ b/deploy/helm/crds/experimental.kubeblocks.io_nodecountscalers.yaml @@ -5,19 +5,19 @@ metadata: controller-gen.kubebuilder.io/version: v0.12.1 labels: app.kubernetes.io/name: kubeblocks - name: nodeawarescalers.experimental.kubeblocks.io + name: nodecountscalers.experimental.kubeblocks.io spec: group: experimental.kubeblocks.io names: categories: - kubeblocks - all - kind: NodeAwareScaler - listKind: NodeAwareScalerList - plural: nodeawarescalers + kind: NodeCountScaler + listKind: NodeCountScalerList + plural: nodecountscalers shortNames: - - nas - singular: nodeawarescaler + - ncs + singular: nodecountscaler scope: Namespaced versions: - additionalPrinterColumns: @@ -43,7 +43,7 @@ spec: name: v1alpha1 schema: openAPIV3Schema: - description: NodeAwareScaler is the Schema for the nodeawarescalers API + description: NodeCountScaler is the Schema for the nodecountscalers API properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation @@ -58,7 +58,7 @@ spec: metadata: type: object spec: - description: NodeAwareScalerSpec defines the desired state of NodeAwareScaler + description: NodeCountScalerSpec defines the desired state of NodeCountScaler properties: targetClusterName: description: Specified the target Cluster name this scaler applies @@ -74,11 +74,11 @@ spec: - targetClusterName type: object status: - description: NodeAwareScalerStatus defines the observed state of NodeAwareScaler + description: NodeCountScalerStatus defines the observed state of NodeCountScaler properties: componentStatuses: description: Records the current status information of all Components - specified in the NodeAwareScalerSpec. + specified in the NodeCountScalerSpec. items: properties: availableReplicas: @@ -113,7 +113,7 @@ spec: type: object type: array conditions: - description: 'Represents the latest available observations of a nodeawarescaler''s + description: 'Represents the latest available observations of a nodecountscaler''s current state. Known .status.conditions.type are: "ScaleReady". ScaleReady - All target components are ready.' items: @@ -187,7 +187,7 @@ spec: - type x-kubernetes-list-type: map lastScaleTime: - description: LastScaleTime is the last time the NodeAwareScaler scaled + description: LastScaleTime is the last time the NodeCountScaler scaled the number of instances. format: date-time type: string diff --git a/deploy/helm/templates/rbac/experimental_nodeawarescaler_editor_role.yaml b/deploy/helm/templates/rbac/experimental_nodecountscaler_editor_role.yaml similarity index 68% rename from deploy/helm/templates/rbac/experimental_nodeawarescaler_editor_role.yaml rename to deploy/helm/templates/rbac/experimental_nodecountscaler_editor_role.yaml index 1466c26f1d2..c5c57d1df4e 100644 --- a/deploy/helm/templates/rbac/experimental_nodeawarescaler_editor_role.yaml +++ b/deploy/helm/templates/rbac/experimental_nodecountscaler_editor_role.yaml @@ -1,15 +1,15 @@ -# permissions for end users to edit nodeawarescalers. +# permissions for end users to edit nodecountscalers. apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: {{- include "kubeblocks.labels" . | nindent 4 }} - name: {{ include "kubeblocks.fullname" . }}-nodeawarescaler-editor-role + name: {{ include "kubeblocks.fullname" . }}-nodecountscaler-editor-role rules: - apiGroups: - experimental.kubeblocks.io resources: - - nodeawarescalers + - nodecountscalers verbs: - create - delete @@ -21,6 +21,6 @@ rules: - apiGroups: - experimental.kubeblocks.io resources: - - nodeawarescalers/status + - nodecountscalers/status verbs: - get diff --git a/deploy/helm/templates/rbac/experimental_nodeawarescaler_viewer_role.yaml b/deploy/helm/templates/rbac/experimental_nodecountscaler_viewer_role.yaml similarity index 65% rename from deploy/helm/templates/rbac/experimental_nodeawarescaler_viewer_role.yaml rename to deploy/helm/templates/rbac/experimental_nodecountscaler_viewer_role.yaml index 4cc51589760..a129b2b0154 100644 --- a/deploy/helm/templates/rbac/experimental_nodeawarescaler_viewer_role.yaml +++ b/deploy/helm/templates/rbac/experimental_nodecountscaler_viewer_role.yaml @@ -1,15 +1,15 @@ -# permissions for end users to view nodeawarescalers. +# permissions for end users to view nodecountscalers. apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: {{- include "kubeblocks.labels" . | nindent 4 }} - name: {{ include "kubeblocks.fullname" . }}-nodeawarescaler-viewer-role + name: {{ include "kubeblocks.fullname" . }}-nodecountscaler-viewer-role rules: - apiGroups: - experimental.kubeblocks.io resources: - - nodeawarescalers + - nodecountscalers verbs: - get - list @@ -17,6 +17,6 @@ rules: - apiGroups: - experimental.kubeblocks.io resources: - - nodeawarescalers/status + - nodecountscalers/status verbs: - get diff --git a/pkg/controller/builder/builder_node_aware_scaler.go b/pkg/controller/builder/builder_node_count_scaler.go similarity index 65% rename from pkg/controller/builder/builder_node_aware_scaler.go rename to pkg/controller/builder/builder_node_count_scaler.go index d20e3a032d9..b5b2e208113 100644 --- a/pkg/controller/builder/builder_node_aware_scaler.go +++ b/pkg/controller/builder/builder_node_count_scaler.go @@ -23,22 +23,22 @@ import ( experimental "github.com/apecloud/kubeblocks/apis/experimental/v1alpha1" ) -type NodeAwareScalerBuilder struct { - BaseBuilder[experimental.NodeAwareScaler, *experimental.NodeAwareScaler, NodeAwareScalerBuilder] +type NodeCountScalerBuilder struct { + BaseBuilder[experimental.NodeCountScaler, *experimental.NodeCountScaler, NodeCountScalerBuilder] } -func NewNodeAwareScalerBuilder(namespace, name string) *NodeAwareScalerBuilder { - builder := &NodeAwareScalerBuilder{} - builder.init(namespace, name, &experimental.NodeAwareScaler{}, builder) +func NewNodeCountScalerBuilder(namespace, name string) *NodeCountScalerBuilder { + builder := &NodeCountScalerBuilder{} + builder.init(namespace, name, &experimental.NodeCountScaler{}, builder) return builder } -func (builder *NodeAwareScalerBuilder) SetTargetClusterName(clusterName string) *NodeAwareScalerBuilder { +func (builder *NodeCountScalerBuilder) SetTargetClusterName(clusterName string) *NodeCountScalerBuilder { builder.get().Spec.TargetClusterName = clusterName return builder } -func (builder *NodeAwareScalerBuilder) SetTargetComponentNames(componentNames []string) *NodeAwareScalerBuilder { +func (builder *NodeCountScalerBuilder) SetTargetComponentNames(componentNames []string) *NodeCountScalerBuilder { builder.get().Spec.TargetComponentNames = componentNames return builder } diff --git a/pkg/controller/builder/builder_node_aware_scaler_test.go b/pkg/controller/builder/builder_node_count_scaler_test.go similarity index 77% rename from pkg/controller/builder/builder_node_aware_scaler_test.go rename to pkg/controller/builder/builder_node_count_scaler_test.go index 3e49320dc72..45bbc2d48d6 100644 --- a/pkg/controller/builder/builder_node_aware_scaler_test.go +++ b/pkg/controller/builder/builder_node_count_scaler_test.go @@ -24,7 +24,7 @@ import ( . "github.com/onsi/gomega" ) -var _ = Describe("node_aware_scaler builder", func() { +var _ = Describe("node_count_scaler builder", func() { It("should work well", func() { const ( name = "foo" @@ -33,14 +33,14 @@ var _ = Describe("node_aware_scaler builder", func() { clusterName := "target-cluster-name" componentNames := []string{"comp-1", "comp-2"} - nas := NewNodeAwareScalerBuilder(ns, name). + ncs := NewNodeCountScalerBuilder(ns, name). SetTargetClusterName(clusterName). SetTargetComponentNames(componentNames). GetObject() - Expect(nas.Name).Should(Equal(name)) - Expect(nas.Namespace).Should(Equal(ns)) - Expect(nas.Spec.TargetClusterName).Should(Equal(clusterName)) - Expect(nas.Spec.TargetComponentNames).Should(Equal(componentNames)) + Expect(ncs.Name).Should(Equal(name)) + Expect(ncs.Namespace).Should(Equal(ns)) + Expect(ncs.Spec.TargetClusterName).Should(Equal(clusterName)) + Expect(ncs.Spec.TargetComponentNames).Should(Equal(componentNames)) }) }) From d4877e826d22429073aac1bd76dd250fe26e2a72 Mon Sep 17 00:00:00 2001 From: free6om Date: Tue, 7 May 2024 17:35:15 +0800 Subject: [PATCH 13/14] fix lint error --- controllers/experimental/suite_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/controllers/experimental/suite_test.go b/controllers/experimental/suite_test.go index e5484bd8fad..fe706753c9a 100644 --- a/controllers/experimental/suite_test.go +++ b/controllers/experimental/suite_test.go @@ -49,9 +49,9 @@ const ( ) var ( - tree *kubebuilderx.ObjectTree - ncs *experimentalv1alpha1.NodeCountScaler - clusterName = "foo" + tree *kubebuilderx.ObjectTree + ncs *experimentalv1alpha1.NodeCountScaler + clusterName = "foo" componentNames = []string{"bar-0", "bar-1"} ) From 07f16ba433661d4c7bd0eae27f2689e21344288c Mon Sep 17 00:00:00 2001 From: free6om Date: Tue, 7 May 2024 18:29:45 +0800 Subject: [PATCH 14/14] move MergeList from pkg instanceset to controllerutil --- .../experimental/reconciler_update_status.go | 4 +- .../instanceset/in_place_update_util.go | 3 +- pkg/controller/instanceset/instance_util.go | 13 ++-- pkg/controller/instanceset/utils.go | 16 ----- pkg/controller/instanceset/utils_test.go | 57 ----------------- pkg/controllerutil/util.go | 16 +++++ pkg/controllerutil/util_test.go | 63 +++++++++++++++++++ 7 files changed, 90 insertions(+), 82 deletions(-) diff --git a/controllers/experimental/reconciler_update_status.go b/controllers/experimental/reconciler_update_status.go index b72866e79d9..6c5c4c51e68 100644 --- a/controllers/experimental/reconciler_update_status.go +++ b/controllers/experimental/reconciler_update_status.go @@ -32,9 +32,9 @@ import ( experimental "github.com/apecloud/kubeblocks/apis/experimental/v1alpha1" workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" "github.com/apecloud/kubeblocks/pkg/constant" - "github.com/apecloud/kubeblocks/pkg/controller/instanceset" "github.com/apecloud/kubeblocks/pkg/controller/kubebuilderx" "github.com/apecloud/kubeblocks/pkg/controller/model" + intctrlutil "github.com/apecloud/kubeblocks/pkg/controllerutil" ) type updateStatusReconciler struct{} @@ -71,7 +71,7 @@ func (r *updateStatusReconciler) Reconcile(tree *kubebuilderx.ObjectTree) (*kube } statusList = append(statusList, status) } - instanceset.MergeList(&statusList, &scaler.Status.ComponentStatuses, + intctrlutil.MergeList(&statusList, &scaler.Status.ComponentStatuses, func(item experimental.ComponentStatus) func(experimental.ComponentStatus) bool { return func(status experimental.ComponentStatus) bool { return item.Name == status.Name diff --git a/pkg/controller/instanceset/in_place_update_util.go b/pkg/controller/instanceset/in_place_update_util.go index 1476cda01df..868c9a7c156 100644 --- a/pkg/controller/instanceset/in_place_update_util.go +++ b/pkg/controller/instanceset/in_place_update_util.go @@ -33,6 +33,7 @@ import ( workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" "github.com/apecloud/kubeblocks/pkg/constant" + intctrlutil "github.com/apecloud/kubeblocks/pkg/controllerutil" "github.com/apecloud/kubeblocks/pkg/dataprotection/utils" viper "github.com/apecloud/kubeblocks/pkg/viperx" ) @@ -131,7 +132,7 @@ func mergeInPlaceFields(src, dst *corev1.Pod) { dst.Spec.ActiveDeadlineSeconds = src.Spec.ActiveDeadlineSeconds // according to the Pod API spec, tolerations can only be appended. // means old tolerations must be in new toleration list. - MergeList(&src.Spec.Tolerations, &dst.Spec.Tolerations, func(item corev1.Toleration) func(corev1.Toleration) bool { + intctrlutil.MergeList(&src.Spec.Tolerations, &dst.Spec.Tolerations, func(item corev1.Toleration) func(corev1.Toleration) bool { return func(t corev1.Toleration) bool { return reflect.DeepEqual(item, t) } diff --git a/pkg/controller/instanceset/instance_util.go b/pkg/controller/instanceset/instance_util.go index 4727498c47a..def29a6691c 100644 --- a/pkg/controller/instanceset/instance_util.go +++ b/pkg/controller/instanceset/instance_util.go @@ -43,6 +43,7 @@ import ( "github.com/apecloud/kubeblocks/pkg/controller/builder" "github.com/apecloud/kubeblocks/pkg/controller/kubebuilderx" "github.com/apecloud/kubeblocks/pkg/controller/model" + intctrlutil "github.com/apecloud/kubeblocks/pkg/controllerutil" ) type InstanceTemplate interface { @@ -322,7 +323,7 @@ func buildInstanceByTemplate(name string, template *instanceTemplateExt, parent }).GetObject() volumeList = append(volumeList, *volume) } - MergeList(&volumeList, &pod.Spec.Volumes, func(item corev1.Volume) func(corev1.Volume) bool { + intctrlutil.MergeList(&volumeList, &pod.Spec.Volumes, func(item corev1.Volume) func(corev1.Volume) bool { return func(v corev1.Volume) bool { return v.Name == item.Name } @@ -602,7 +603,7 @@ func buildInstanceTemplateExt(template workloads.InstanceTemplate, templateExt * mergeCPUNMemory(&src.Requests, &dst.Requests) } if template.Env != nil { - MergeList(&template.Env, &templateExt.Spec.Containers[0].Env, + intctrlutil.MergeList(&template.Env, &templateExt.Spec.Containers[0].Env, func(item corev1.EnvVar) func(corev1.EnvVar) bool { return func(env corev1.EnvVar) bool { return env.Name == item.Name @@ -610,25 +611,25 @@ func buildInstanceTemplateExt(template workloads.InstanceTemplate, templateExt * }) } } - MergeList(&template.Tolerations, &templateExt.Spec.Tolerations, + intctrlutil.MergeList(&template.Tolerations, &templateExt.Spec.Tolerations, func(item corev1.Toleration) func(corev1.Toleration) bool { return func(t corev1.Toleration) bool { return reflect.DeepEqual(item, t) } }) - MergeList(&template.Volumes, &templateExt.Spec.Volumes, + intctrlutil.MergeList(&template.Volumes, &templateExt.Spec.Volumes, func(item corev1.Volume) func(corev1.Volume) bool { return func(v corev1.Volume) bool { return v.Name == item.Name } }) - MergeList(&template.VolumeMounts, &templateExt.Spec.Containers[0].VolumeMounts, + intctrlutil.MergeList(&template.VolumeMounts, &templateExt.Spec.Containers[0].VolumeMounts, func(item corev1.VolumeMount) func(corev1.VolumeMount) bool { return func(vm corev1.VolumeMount) bool { return vm.Name == item.Name } }) - MergeList(&template.VolumeClaimTemplates, &templateExt.VolumeClaimTemplates, + intctrlutil.MergeList(&template.VolumeClaimTemplates, &templateExt.VolumeClaimTemplates, func(item corev1.PersistentVolumeClaim) func(corev1.PersistentVolumeClaim) bool { return func(claim corev1.PersistentVolumeClaim) bool { return claim.Name == item.Name diff --git a/pkg/controller/instanceset/utils.go b/pkg/controller/instanceset/utils.go index 3697ac568a0..0a45f2f60b4 100644 --- a/pkg/controller/instanceset/utils.go +++ b/pkg/controller/instanceset/utils.go @@ -23,7 +23,6 @@ import ( "fmt" "strings" - "golang.org/x/exp/slices" corev1 "k8s.io/api/core/v1" workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" @@ -197,21 +196,6 @@ func mergeMap[K comparable, V any](src, dst *map[K]V) { } } -func MergeList[E any](src, dst *[]E, f func(E) func(E) bool) { - if len(*src) == 0 { - return - } - for i := range *src { - item := (*src)[i] - index := slices.IndexFunc(*dst, f(item)) - if index >= 0 { - (*dst)[index] = item - } else { - *dst = append(*dst, item) - } - } -} - func getMatchLabels(name string) map[string]string { return map[string]string{ WorkloadsManagedByLabelKey: workloads.Kind, diff --git a/pkg/controller/instanceset/utils_test.go b/pkg/controller/instanceset/utils_test.go index 61ca3dbe69d..e3e6acacfda 100644 --- a/pkg/controller/instanceset/utils_test.go +++ b/pkg/controller/instanceset/utils_test.go @@ -25,7 +25,6 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "golang.org/x/exp/slices" corev1 "k8s.io/api/core/v1" "github.com/apecloud/kubeblocks/pkg/controller/builder" @@ -40,62 +39,6 @@ var _ = Describe("utils test", func() { priorityMap = ComposeRolePriorityMap(its.Spec.Roles) }) - Context("MergeList", func() { - It("should work well", func() { - src := []corev1.Volume{ - { - Name: "pvc1", - VolumeSource: corev1.VolumeSource{ - PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ - ClaimName: "pvc1-pod-0", - }, - }, - }, - { - Name: "pvc2", - VolumeSource: corev1.VolumeSource{ - PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ - ClaimName: "pvc2-pod-0", - }, - }, - }, - } - dst := []corev1.Volume{ - { - Name: "pvc0", - VolumeSource: corev1.VolumeSource{ - PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ - ClaimName: "pvc0-pod-0", - }, - }, - }, - { - Name: "pvc1", - VolumeSource: corev1.VolumeSource{ - PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ - ClaimName: "pvc-pod-0", - }, - }, - }, - } - MergeList(&src, &dst, func(v corev1.Volume) func(corev1.Volume) bool { - return func(volume corev1.Volume) bool { - return v.Name == volume.Name - } - }) - - Expect(dst).Should(HaveLen(3)) - slices.SortStableFunc(dst, func(a, b corev1.Volume) bool { - return a.Name < b.Name - }) - Expect(dst[0].Name).Should(Equal("pvc0")) - Expect(dst[1].Name).Should(Equal("pvc1")) - Expect(dst[1].PersistentVolumeClaim).ShouldNot(BeNil()) - Expect(dst[1].PersistentVolumeClaim.ClaimName).Should(Equal("pvc1-pod-0")) - Expect(dst[2].Name).Should(Equal("pvc2")) - }) - }) - Context("mergeMap", func() { It("should work well", func() { src := map[string]string{ diff --git a/pkg/controllerutil/util.go b/pkg/controllerutil/util.go index d56c81bef08..670fefd63c2 100644 --- a/pkg/controllerutil/util.go +++ b/pkg/controllerutil/util.go @@ -23,6 +23,7 @@ import ( "context" "reflect" + "golang.org/x/exp/slices" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -183,3 +184,18 @@ func DeleteOwnedResources[T generics.Object, PT generics.PObject[T], L generics. } return nil } + +func MergeList[E any](src, dst *[]E, f func(E) func(E) bool) { + if len(*src) == 0 { + return + } + for i := range *src { + item := (*src)[i] + index := slices.IndexFunc(*dst, f(item)) + if index >= 0 { + (*dst)[index] = item + } else { + *dst = append(*dst, item) + } + } +} diff --git a/pkg/controllerutil/util_test.go b/pkg/controllerutil/util_test.go index 946dcfa434b..40093458320 100644 --- a/pkg/controllerutil/util_test.go +++ b/pkg/controllerutil/util_test.go @@ -23,9 +23,72 @@ import ( "context" "testing" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "golang.org/x/exp/slices" + corev1 "k8s.io/api/core/v1" "k8s.io/client-go/tools/record" ) +var _ = Describe("utils test", func() { + Context("MergeList", func() { + It("should work well", func() { + src := []corev1.Volume{ + { + Name: "pvc1", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "pvc1-pod-0", + }, + }, + }, + { + Name: "pvc2", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "pvc2-pod-0", + }, + }, + }, + } + dst := []corev1.Volume{ + { + Name: "pvc0", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "pvc0-pod-0", + }, + }, + }, + { + Name: "pvc1", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "pvc-pod-0", + }, + }, + }, + } + MergeList(&src, &dst, func(v corev1.Volume) func(corev1.Volume) bool { + return func(volume corev1.Volume) bool { + return v.Name == volume.Name + } + }) + + Expect(dst).Should(HaveLen(3)) + slices.SortStableFunc(dst, func(a, b corev1.Volume) bool { + return a.Name < b.Name + }) + Expect(dst[0].Name).Should(Equal("pvc0")) + Expect(dst[1].Name).Should(Equal("pvc1")) + Expect(dst[1].PersistentVolumeClaim).ShouldNot(BeNil()) + Expect(dst[1].PersistentVolumeClaim.ClaimName).Should(Equal("pvc1-pod-0")) + Expect(dst[2].Name).Should(Equal("pvc2")) + }) + }) +}) + func TestGetUncachedObjects(t *testing.T) { GetUncachedObjects() }