From c29cb95bbc2788b99f5443d3324eef874b788d41 Mon Sep 17 00:00:00 2001 From: Ismail Alidzhikov Date: Thu, 6 Jun 2024 11:21:30 +0300 Subject: [PATCH 1/2] vpa-admission-controller: Wire contexts --- .../pkg/admission-controller/logic/server.go | 19 +++++++----- .../admission-controller/resource/handler.go | 4 ++- .../resource/pod/handler.go | 9 +++--- .../resource/pod/handler_test.go | 5 +-- .../resource/vpa/handler.go | 3 +- .../resource/vpa/matcher.go | 10 +++--- .../resource/vpa/matcher_test.go | 3 +- .../pkg/recommender/input/cluster_feeder.go | 20 ++++++------ .../recommender/input/cluster_feeder_test.go | 7 +++-- .../pkg/recommender/model/cluster.go | 17 +++++----- .../pkg/recommender/model/cluster_test.go | 31 ++++++++++--------- .../pkg/recommender/routines/recommender.go | 4 +-- .../controller_fetcher/controller_fetcher.go | 26 ++++++++-------- .../controller_fetcher_fake.go | 4 ++- .../controller_fetcher_test.go | 10 ++++-- vertical-pod-autoscaler/pkg/target/fetcher.go | 10 +++--- .../pkg/target/mock/fetcher_mock.go | 4 ++- .../pkg/updater/logic/updater.go | 4 +-- vertical-pod-autoscaler/pkg/utils/vpa/api.go | 4 +-- .../pkg/utils/vpa/api_test.go | 9 ++++-- 20 files changed, 115 insertions(+), 88 deletions(-) diff --git a/vertical-pod-autoscaler/pkg/admission-controller/logic/server.go b/vertical-pod-autoscaler/pkg/admission-controller/logic/server.go index 6fb0093467f6..92f9d3893da4 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/logic/server.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/logic/server.go @@ -17,12 +17,13 @@ limitations under the License. package logic import ( + "context" "encoding/json" "fmt" "io" "net/http" - "k8s.io/api/admission/v1" + admissionv1 "k8s.io/api/admission/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/admission-controller/resource" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/admission-controller/resource/pod" @@ -56,12 +57,12 @@ func (s *AdmissionServer) RegisterResourceHandler(resourceHandler resource.Handl s.resourceHandlers[resourceHandler.GroupResource()] = resourceHandler } -func (s *AdmissionServer) admit(data []byte) (*v1.AdmissionResponse, metrics_admission.AdmissionStatus, metrics_admission.AdmissionResource) { +func (s *AdmissionServer) admit(ctx context.Context, data []byte) (*admissionv1.AdmissionResponse, metrics_admission.AdmissionStatus, metrics_admission.AdmissionResource) { // we don't block the admission by default, even on unparsable JSON - response := v1.AdmissionResponse{} + response := admissionv1.AdmissionResponse{} response.Allowed = true - ar := v1.AdmissionReview{} + ar := admissionv1.AdmissionReview{} if err := json.Unmarshal(data, &ar); err != nil { klog.Error(err) return &response, metrics_admission.Error, metrics_admission.Unknown @@ -80,7 +81,7 @@ func (s *AdmissionServer) admit(data []byte) (*v1.AdmissionResponse, metrics_adm handler, ok := s.resourceHandlers[admittedGroupResource] if ok { - patches, err = handler.GetPatches(ar.Request) + patches, err = handler.GetPatches(ctx, ar.Request) resource = handler.AdmissionResource() if handler.DisallowIncorrectObjects() && err != nil { @@ -106,7 +107,7 @@ func (s *AdmissionServer) admit(data []byte) (*v1.AdmissionResponse, metrics_adm klog.Errorf("Cannot marshal the patch %v: %v", patches, err) return &response, metrics_admission.Error, resource } - patchType := v1.PatchTypeJSONPatch + patchType := admissionv1.PatchTypeJSONPatch response.PatchType = &patchType response.Patch = patch klog.V(4).Infof("Sending patches: %v", patches) @@ -127,6 +128,8 @@ func (s *AdmissionServer) admit(data []byte) (*v1.AdmissionResponse, metrics_adm // Serve is a handler function of AdmissionServer func (s *AdmissionServer) Serve(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + executionTimer := metrics_admission.NewExecutionTimer() defer executionTimer.ObserveTotal() admissionLatency := metrics_admission.NewAdmissionLatency() @@ -146,8 +149,8 @@ func (s *AdmissionServer) Serve(w http.ResponseWriter, r *http.Request) { } executionTimer.ObserveStep("read_request") - reviewResponse, status, resource := s.admit(body) - ar := v1.AdmissionReview{ + reviewResponse, status, resource := s.admit(ctx, body) + ar := admissionv1.AdmissionReview{ Response: reviewResponse, TypeMeta: metav1.TypeMeta{ Kind: "AdmissionReview", diff --git a/vertical-pod-autoscaler/pkg/admission-controller/resource/handler.go b/vertical-pod-autoscaler/pkg/admission-controller/resource/handler.go index 522c36359fac..0b1bbc03c41b 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/resource/handler.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/resource/handler.go @@ -17,6 +17,8 @@ limitations under the License. package resource import ( + "context" + v1 "k8s.io/api/admission/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/metrics/admission" @@ -38,5 +40,5 @@ type Handler interface { // DisallowIncorrectObjects returns whether incorrect objects (eg. unparsable, not passing validations) should be disallowed by Admission Server. DisallowIncorrectObjects() bool // GetPatches returns patches for given AdmissionRequest - GetPatches(*v1.AdmissionRequest) ([]PatchRecord, error) + GetPatches(context.Context, *v1.AdmissionRequest) ([]PatchRecord, error) } diff --git a/vertical-pod-autoscaler/pkg/admission-controller/resource/pod/handler.go b/vertical-pod-autoscaler/pkg/admission-controller/resource/pod/handler.go index 20d7549e5c34..14e8e4324f2e 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/resource/pod/handler.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/resource/pod/handler.go @@ -17,11 +17,12 @@ limitations under the License. package pod import ( + "context" "encoding/json" "fmt" admissionv1 "k8s.io/api/admission/v1" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" resource_admission "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/admission-controller/resource" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/admission-controller/resource/pod/patch" @@ -63,12 +64,12 @@ func (h *resourceHandler) DisallowIncorrectObjects() bool { } // GetPatches builds patches for Pod in given admission request. -func (h *resourceHandler) GetPatches(ar *admissionv1.AdmissionRequest) ([]resource_admission.PatchRecord, error) { +func (h *resourceHandler) GetPatches(ctx context.Context, ar *admissionv1.AdmissionRequest) ([]resource_admission.PatchRecord, error) { if ar.Resource.Version != "v1" { return nil, fmt.Errorf("only v1 Pods are supported") } raw, namespace := ar.Object.Raw, ar.Namespace - pod := v1.Pod{} + pod := corev1.Pod{} if err := json.Unmarshal(raw, &pod); err != nil { return nil, err } @@ -77,7 +78,7 @@ func (h *resourceHandler) GetPatches(ar *admissionv1.AdmissionRequest) ([]resour pod.Namespace = namespace } klog.V(4).Infof("Admitting pod %s", klog.KObj(&pod)) - controllingVpa := h.vpaMatcher.GetMatchingVPA(&pod) + controllingVpa := h.vpaMatcher.GetMatchingVPA(ctx, &pod) if controllingVpa == nil { klog.V(4).Infof("No matching VPA found for pod %s", klog.KObj(&pod)) return []resource_admission.PatchRecord{}, nil diff --git a/vertical-pod-autoscaler/pkg/admission-controller/resource/pod/handler_test.go b/vertical-pod-autoscaler/pkg/admission-controller/resource/pod/handler_test.go index d00e31106182..a82c723b96c8 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/resource/pod/handler_test.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/resource/pod/handler_test.go @@ -17,6 +17,7 @@ limitations under the License. package pod import ( + "context" "fmt" "testing" @@ -43,7 +44,7 @@ type fakeVpaMatcher struct { vpa *vpa_types.VerticalPodAutoscaler } -func (m *fakeVpaMatcher) GetMatchingVPA(_ *apiv1.Pod) *vpa_types.VerticalPodAutoscaler { +func (m *fakeVpaMatcher) GetMatchingVPA(_ context.Context, _ *apiv1.Pod) *vpa_types.VerticalPodAutoscaler { return m.vpa } @@ -176,7 +177,7 @@ func TestGetPatches(t *testing.T) { fppp := &fakePodPreProcessor{tc.podPreProcessorError} fvm := &fakeVpaMatcher{vpa: tc.vpa} h := NewResourceHandler(fppp, fvm, tc.calculators) - patches, err := h.GetPatches(&admissionv1.AdmissionRequest{ + patches, err := h.GetPatches(context.Background(), &admissionv1.AdmissionRequest{ Resource: v1.GroupVersionResource{ Version: "v1", }, diff --git a/vertical-pod-autoscaler/pkg/admission-controller/resource/vpa/handler.go b/vertical-pod-autoscaler/pkg/admission-controller/resource/vpa/handler.go index 59927edbbe53..39ccdeccfe40 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/resource/vpa/handler.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/resource/vpa/handler.go @@ -17,6 +17,7 @@ limitations under the License. package vpa import ( + "context" "encoding/json" "fmt" @@ -71,7 +72,7 @@ func (h *resourceHandler) DisallowIncorrectObjects() bool { } // GetPatches builds patches for VPA in given admission request. -func (h *resourceHandler) GetPatches(ar *v1.AdmissionRequest) ([]resource.PatchRecord, error) { +func (h *resourceHandler) GetPatches(_ context.Context, ar *v1.AdmissionRequest) ([]resource.PatchRecord, error) { raw, isCreate := ar.Object.Raw, ar.Operation == v1.Create vpa, err := parseVPA(raw) if err != nil { diff --git a/vertical-pod-autoscaler/pkg/admission-controller/resource/vpa/matcher.go b/vertical-pod-autoscaler/pkg/admission-controller/resource/vpa/matcher.go index cee7c97859ff..83ba5c11abf7 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/resource/vpa/matcher.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/resource/vpa/matcher.go @@ -17,6 +17,8 @@ limitations under the License. package vpa import ( + "context" + core "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/klog/v2" @@ -31,7 +33,7 @@ import ( // Matcher is capable of returning a single matching VPA object // for a pod. Will return nil if no matching object is found. type Matcher interface { - GetMatchingVPA(pod *core.Pod) *vpa_types.VerticalPodAutoscaler + GetMatchingVPA(ctx context.Context, pod *core.Pod) *vpa_types.VerticalPodAutoscaler } type matcher struct { @@ -49,7 +51,7 @@ func NewMatcher(vpaLister vpa_lister.VerticalPodAutoscalerLister, controllerFetcher: controllerFetcher} } -func (m *matcher) GetMatchingVPA(pod *core.Pod) *vpa_types.VerticalPodAutoscaler { +func (m *matcher) GetMatchingVPA(ctx context.Context, pod *core.Pod) *vpa_types.VerticalPodAutoscaler { configs, err := m.vpaLister.VerticalPodAutoscalers(pod.Namespace).List(labels.Everything()) if err != nil { klog.Errorf("failed to get vpa configs: %v", err) @@ -60,7 +62,7 @@ func (m *matcher) GetMatchingVPA(pod *core.Pod) *vpa_types.VerticalPodAutoscaler if vpa_api_util.GetUpdateMode(vpaConfig) == vpa_types.UpdateModeOff { continue } - selector, err := m.selectorFetcher.Fetch(vpaConfig) + selector, err := m.selectorFetcher.Fetch(ctx, vpaConfig) if err != nil { klog.V(3).Infof("skipping VPA object %s because we cannot fetch selector: %s", klog.KObj(vpaConfig), err) continue @@ -71,7 +73,7 @@ func (m *matcher) GetMatchingVPA(pod *core.Pod) *vpa_types.VerticalPodAutoscaler }) } klog.V(2).Infof("Let's choose from %d configs for pod %s", len(onConfigs), klog.KObj(pod)) - result := vpa_api_util.GetControllingVPAForPod(pod, onConfigs, m.controllerFetcher) + result := vpa_api_util.GetControllingVPAForPod(ctx, pod, onConfigs, m.controllerFetcher) if result != nil { return result.Vpa } diff --git a/vertical-pod-autoscaler/pkg/admission-controller/resource/vpa/matcher_test.go b/vertical-pod-autoscaler/pkg/admission-controller/resource/vpa/matcher_test.go index 035f64ed2079..ce149e3f48d0 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/resource/vpa/matcher_test.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/resource/vpa/matcher_test.go @@ -17,6 +17,7 @@ limitations under the License. package vpa import ( + "context" "testing" appsv1 "k8s.io/api/apps/v1" @@ -148,7 +149,7 @@ func TestGetMatchingVpa(t *testing.T) { // The hierarchy part is being test in the "TestControllerFetcher" test. matcher := NewMatcher(vpaLister, mockSelectorFetcher, controllerfetcher.FakeControllerFetcher{}) - vpa := matcher.GetMatchingVPA(tc.pod) + vpa := matcher.GetMatchingVPA(context.Background(), tc.pod) if tc.expectedFound && assert.NotNil(t, vpa) { assert.Equal(t, tc.expectedVpaName, vpa.Name) } else { diff --git a/vertical-pod-autoscaler/pkg/recommender/input/cluster_feeder.go b/vertical-pod-autoscaler/pkg/recommender/input/cluster_feeder.go index 9dd5da9df3b5..8b3851cfc894 100644 --- a/vertical-pod-autoscaler/pkg/recommender/input/cluster_feeder.go +++ b/vertical-pod-autoscaler/pkg/recommender/input/cluster_feeder.go @@ -62,7 +62,7 @@ type ClusterStateFeeder interface { InitFromCheckpoints() // LoadVPAs updates clusterState with current state of VPAs. - LoadVPAs() + LoadVPAs(ctx context.Context) // LoadPods updates clusterState with current specification of Pods and their Containers. LoadPods() @@ -243,7 +243,7 @@ func (feeder *clusterStateFeeder) setVpaCheckpoint(checkpoint *vpa_types.Vertica func (feeder *clusterStateFeeder) InitFromCheckpoints() { klog.V(3).Info("Initializing VPA from checkpoints") - feeder.LoadVPAs() + feeder.LoadVPAs(context.TODO()) namespaces := make(map[string]bool) for _, v := range feeder.clusterState.Vpas { @@ -270,7 +270,7 @@ func (feeder *clusterStateFeeder) InitFromCheckpoints() { func (feeder *clusterStateFeeder) GarbageCollectCheckpoints() { klog.V(3).Info("Starting garbage collection of checkpoints") - feeder.LoadVPAs() + feeder.LoadVPAs(context.TODO()) namespaceList, err := feeder.coreClient.Namespaces().List(context.TODO(), metav1.ListOptions{}) if err != nil { @@ -338,7 +338,7 @@ func filterVPAs(feeder *clusterStateFeeder, allVpaCRDs []*vpa_types.VerticalPodA } // LoadVPAs fetches VPA objects and loads them into the cluster state. -func (feeder *clusterStateFeeder) LoadVPAs() { +func (feeder *clusterStateFeeder) LoadVPAs(ctx context.Context) { // List VPA API objects. allVpaCRDs, err := feeder.vpaLister.List(labels.Everything()) if err != nil { @@ -358,7 +358,7 @@ func (feeder *clusterStateFeeder) LoadVPAs() { VpaName: vpaCRD.Name, } - selector, conditions := feeder.getSelector(vpaCRD) + selector, conditions := feeder.getSelector(ctx, vpaCRD) klog.V(4).Infof("Using selector %s for VPA %s", selector.String(), klog.KObj(vpaCRD)) if feeder.clusterState.AddOrUpdateVpa(vpaCRD, selector) == nil { @@ -486,7 +486,7 @@ type condition struct { message string } -func (feeder *clusterStateFeeder) validateTargetRef(vpa *vpa_types.VerticalPodAutoscaler) (bool, condition) { +func (feeder *clusterStateFeeder) validateTargetRef(ctx context.Context, vpa *vpa_types.VerticalPodAutoscaler) (bool, condition) { // if vpa.Spec.TargetRef == nil { return false, condition{} @@ -499,7 +499,7 @@ func (feeder *clusterStateFeeder) validateTargetRef(vpa *vpa_types.VerticalPodAu }, ApiVersion: vpa.Spec.TargetRef.APIVersion, } - top, err := feeder.controllerFetcher.FindTopMostWellKnownOrScalable(&k) + top, err := feeder.controllerFetcher.FindTopMostWellKnownOrScalable(ctx, &k) if err != nil { return false, condition{conditionType: vpa_types.ConfigUnsupported, delete: false, message: fmt.Sprintf("Error checking if target is a topmost well-known or scalable controller: %s", err)} } @@ -512,10 +512,10 @@ func (feeder *clusterStateFeeder) validateTargetRef(vpa *vpa_types.VerticalPodAu return true, condition{} } -func (feeder *clusterStateFeeder) getSelector(vpa *vpa_types.VerticalPodAutoscaler) (labels.Selector, []condition) { - selector, fetchErr := feeder.selectorFetcher.Fetch(vpa) +func (feeder *clusterStateFeeder) getSelector(ctx context.Context, vpa *vpa_types.VerticalPodAutoscaler) (labels.Selector, []condition) { + selector, fetchErr := feeder.selectorFetcher.Fetch(ctx, vpa) if selector != nil { - validTargetRef, unsupportedCondition := feeder.validateTargetRef(vpa) + validTargetRef, unsupportedCondition := feeder.validateTargetRef(ctx, vpa) if !validTargetRef { return labels.Nothing(), []condition{ unsupportedCondition, diff --git a/vertical-pod-autoscaler/pkg/recommender/input/cluster_feeder_test.go b/vertical-pod-autoscaler/pkg/recommender/input/cluster_feeder_test.go index e2d2a81d17a5..d5d29b5b443f 100644 --- a/vertical-pod-autoscaler/pkg/recommender/input/cluster_feeder_test.go +++ b/vertical-pod-autoscaler/pkg/recommender/input/cluster_feeder_test.go @@ -17,6 +17,7 @@ limitations under the License. package input import ( + "context" "fmt" "testing" "time" @@ -32,7 +33,7 @@ import ( "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/recommender/input/history" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/recommender/input/spec" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/recommender/model" - "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/target/controller_fetcher" + controllerfetcher "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/target/controller_fetcher" target_mock "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/target/mock" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/test" ) @@ -42,7 +43,7 @@ type fakeControllerFetcher struct { err error } -func (f *fakeControllerFetcher) FindTopMostWellKnownOrScalable(_ *controllerfetcher.ControllerKeyWithAPIVersion) (*controllerfetcher.ControllerKeyWithAPIVersion, error) { +func (f *fakeControllerFetcher) FindTopMostWellKnownOrScalable(_ context.Context, _ *controllerfetcher.ControllerKeyWithAPIVersion) (*controllerfetcher.ControllerKeyWithAPIVersion, error) { return f.key, f.err } @@ -315,7 +316,7 @@ func TestLoadPods(t *testing.T) { if tc.expectedVpaFetch { targetSelectorFetcher.EXPECT().Fetch(vpa).Return(tc.selector, tc.fetchSelectorError) } - clusterStateFeeder.LoadVPAs() + clusterStateFeeder.LoadVPAs(context.Background()) vpaID := model.VpaID{ Namespace: vpa.Namespace, diff --git a/vertical-pod-autoscaler/pkg/recommender/model/cluster.go b/vertical-pod-autoscaler/pkg/recommender/model/cluster.go index b9912b946ed5..ff60d4cf0a5b 100644 --- a/vertical-pod-autoscaler/pkg/recommender/model/cluster.go +++ b/vertical-pod-autoscaler/pkg/recommender/model/cluster.go @@ -17,6 +17,7 @@ limitations under the License. package model import ( + "context" "fmt" "time" @@ -360,10 +361,10 @@ func (cluster *ClusterState) findOrCreateAggregateContainerState(containerID Con // // 2) The last sample is too old to give meaningful recommendation (>8 days), // 3) There are no samples and the aggregate state was created >8 days ago. -func (cluster *ClusterState) garbageCollectAggregateCollectionStates(now time.Time, controllerFetcher controllerfetcher.ControllerFetcher) { +func (cluster *ClusterState) garbageCollectAggregateCollectionStates(ctx context.Context, now time.Time, controllerFetcher controllerfetcher.ControllerFetcher) { klog.V(1).Info("Garbage collection of AggregateCollectionStates triggered") keysToDelete := make([]AggregateStateKey, 0) - contributiveKeys := cluster.getContributiveAggregateStateKeys(controllerFetcher) + contributiveKeys := cluster.getContributiveAggregateStateKeys(ctx, controllerFetcher) for key, aggregateContainerState := range cluster.aggregateStateMap { isKeyContributive := contributiveKeys[key] if !isKeyContributive && aggregateContainerState.isEmpty() { @@ -394,21 +395,21 @@ func (cluster *ClusterState) garbageCollectAggregateCollectionStates(now time.Ti // // 2) The last sample is too old to give meaningful recommendation (>8 days), // 3) There are no samples and the aggregate state was created >8 days ago. -func (cluster *ClusterState) RateLimitedGarbageCollectAggregateCollectionStates(now time.Time, controllerFetcher controllerfetcher.ControllerFetcher) { +func (cluster *ClusterState) RateLimitedGarbageCollectAggregateCollectionStates(ctx context.Context, now time.Time, controllerFetcher controllerfetcher.ControllerFetcher) { if now.Sub(cluster.lastAggregateContainerStateGC) < cluster.gcInterval { return } - cluster.garbageCollectAggregateCollectionStates(now, controllerFetcher) + cluster.garbageCollectAggregateCollectionStates(ctx, now, controllerFetcher) cluster.lastAggregateContainerStateGC = now } -func (cluster *ClusterState) getContributiveAggregateStateKeys(controllerFetcher controllerfetcher.ControllerFetcher) map[AggregateStateKey]bool { +func (cluster *ClusterState) getContributiveAggregateStateKeys(ctx context.Context, controllerFetcher controllerfetcher.ControllerFetcher) map[AggregateStateKey]bool { contributiveKeys := map[AggregateStateKey]bool{} for _, pod := range cluster.Pods { // Pod is considered contributive in any of following situations: // 1) It is in active state - i.e. not PodSucceeded nor PodFailed. // 2) Its associated controller (e.g. Deployment) still exists. - podControllerExists := cluster.GetControllerForPodUnderVPA(pod, controllerFetcher) != nil + podControllerExists := cluster.GetControllerForPodUnderVPA(ctx, pod, controllerFetcher) != nil podActive := pod.Phase != apiv1.PodSucceeded && pod.Phase != apiv1.PodFailed if podActive || podControllerExists { for container := range pod.Containers { @@ -453,7 +454,7 @@ func (cluster *ClusterState) GetMatchingPods(vpa *Vpa) []PodID { } // GetControllerForPodUnderVPA returns controller associated with given Pod. Returns nil if Pod is not controlled by a VPA object. -func (cluster *ClusterState) GetControllerForPodUnderVPA(pod *PodState, controllerFetcher controllerfetcher.ControllerFetcher) *controllerfetcher.ControllerKeyWithAPIVersion { +func (cluster *ClusterState) GetControllerForPodUnderVPA(ctx context.Context, pod *PodState, controllerFetcher controllerfetcher.ControllerFetcher) *controllerfetcher.ControllerKeyWithAPIVersion { controllingVPA := cluster.GetControllingVPA(pod) if controllingVPA != nil { controller := &controllerfetcher.ControllerKeyWithAPIVersion{ @@ -464,7 +465,7 @@ func (cluster *ClusterState) GetControllerForPodUnderVPA(pod *PodState, controll }, ApiVersion: controllingVPA.TargetRef.APIVersion, } - topLevelController, _ := controllerFetcher.FindTopMostWellKnownOrScalable(controller) + topLevelController, _ := controllerFetcher.FindTopMostWellKnownOrScalable(ctx, controller) return topLevelController } return nil diff --git a/vertical-pod-autoscaler/pkg/recommender/model/cluster_test.go b/vertical-pod-autoscaler/pkg/recommender/model/cluster_test.go index a37658dbd810..a0780f4e82a9 100644 --- a/vertical-pod-autoscaler/pkg/recommender/model/cluster_test.go +++ b/vertical-pod-autoscaler/pkg/recommender/model/cluster_test.go @@ -17,6 +17,7 @@ limitations under the License. package model import ( + "context" "fmt" "testing" "time" @@ -29,11 +30,13 @@ import ( "k8s.io/klog/v2" vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1" - "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/target/controller_fetcher" + controllerfetcher "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/target/controller_fetcher" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/test" ) var ( + ctx = context.Background() + testPodID = PodID{"namespace-1", "pod-1"} testPodID3 = PodID{"namespace-1", "pod-3"} testPodID4 = PodID{"namespace-1", "pod-4"} @@ -67,7 +70,7 @@ type fakeControllerFetcher struct { err error } -func (f *fakeControllerFetcher) FindTopMostWellKnownOrScalable(controller *controllerfetcher.ControllerKeyWithAPIVersion) (*controllerfetcher.ControllerKeyWithAPIVersion, error) { +func (f *fakeControllerFetcher) FindTopMostWellKnownOrScalable(_ context.Context, _ *controllerfetcher.ControllerKeyWithAPIVersion) (*controllerfetcher.ControllerKeyWithAPIVersion, error) { return f.key, f.err } @@ -112,7 +115,7 @@ func TestClusterGCAggregateContainerStateDeletesOld(t *testing.T) { assert.NotEmpty(t, vpa.aggregateContainerStates) // AggregateContainerState are valid for 8 days since last sample - cluster.garbageCollectAggregateCollectionStates(usageSample.MeasureStart.Add(9*24*time.Hour), testControllerFetcher) + cluster.garbageCollectAggregateCollectionStates(ctx, usageSample.MeasureStart.Add(9*24*time.Hour), testControllerFetcher) // AggregateContainerState should be deleted from both cluster and vpa assert.Empty(t, cluster.aggregateStateMap) @@ -138,12 +141,12 @@ func TestClusterGCAggregateContainerStateDeletesOldEmpty(t *testing.T) { } // Verify empty aggregate states are not removed right away. - cluster.garbageCollectAggregateCollectionStates(creationTime.Add(1*time.Minute), testControllerFetcher) // AggregateContainerState should be deleted from both cluster and vpa + cluster.garbageCollectAggregateCollectionStates(ctx, creationTime.Add(1*time.Minute), testControllerFetcher) // AggregateContainerState should be deleted from both cluster and vpa assert.NotEmpty(t, cluster.aggregateStateMap) assert.NotEmpty(t, vpa.aggregateContainerStates) // AggregateContainerState are valid for 8 days since creation - cluster.garbageCollectAggregateCollectionStates(creationTime.Add(9*24*time.Hour), testControllerFetcher) + cluster.garbageCollectAggregateCollectionStates(ctx, creationTime.Add(9*24*time.Hour), testControllerFetcher) // AggregateContainerState should be deleted from both cluster and vpa assert.Empty(t, cluster.aggregateStateMap) @@ -167,14 +170,14 @@ func TestClusterGCAggregateContainerStateDeletesEmptyInactiveWithoutController(t assert.NotEmpty(t, cluster.aggregateStateMap) assert.NotEmpty(t, vpa.aggregateContainerStates) - cluster.garbageCollectAggregateCollectionStates(testTimestamp, controller) + cluster.garbageCollectAggregateCollectionStates(ctx, testTimestamp, controller) // AggregateContainerState should not be deleted as the pod is still active. assert.NotEmpty(t, cluster.aggregateStateMap) assert.NotEmpty(t, vpa.aggregateContainerStates) cluster.Pods[pod.ID].Phase = apiv1.PodSucceeded - cluster.garbageCollectAggregateCollectionStates(testTimestamp, controller) + cluster.garbageCollectAggregateCollectionStates(ctx, testTimestamp, controller) // AggregateContainerState should be empty as the pod is no longer active, controller is not alive // and there are no usage samples. @@ -196,14 +199,14 @@ func TestClusterGCAggregateContainerStateLeavesEmptyInactiveWithController(t *te assert.NotEmpty(t, cluster.aggregateStateMap) assert.NotEmpty(t, vpa.aggregateContainerStates) - cluster.garbageCollectAggregateCollectionStates(testTimestamp, controller) + cluster.garbageCollectAggregateCollectionStates(ctx, testTimestamp, controller) // AggregateContainerState should not be deleted as the pod is still active. assert.NotEmpty(t, cluster.aggregateStateMap) assert.NotEmpty(t, vpa.aggregateContainerStates) cluster.Pods[pod.ID].Phase = apiv1.PodSucceeded - cluster.garbageCollectAggregateCollectionStates(testTimestamp, controller) + cluster.garbageCollectAggregateCollectionStates(ctx, testTimestamp, controller) // AggregateContainerState should not be deleted as the controller is still alive. assert.NotEmpty(t, cluster.aggregateStateMap) @@ -226,7 +229,7 @@ func TestClusterGCAggregateContainerStateLeavesValid(t *testing.T) { assert.NotEmpty(t, vpa.aggregateContainerStates) // AggregateContainerState are valid for 8 days since last sample - cluster.garbageCollectAggregateCollectionStates(usageSample.MeasureStart.Add(7*24*time.Hour), testControllerFetcher) + cluster.garbageCollectAggregateCollectionStates(ctx, usageSample.MeasureStart.Add(7*24*time.Hour), testControllerFetcher) assert.NotEmpty(t, cluster.aggregateStateMap) assert.NotEmpty(t, vpa.aggregateContainerStates) @@ -253,7 +256,7 @@ func TestAddSampleAfterAggregateContainerStateGCed(t *testing.T) { // AggregateContainerState are invalid after 8 days since last sample gcTimestamp := usageSample.MeasureStart.Add(10 * 24 * time.Hour) - cluster.garbageCollectAggregateCollectionStates(gcTimestamp, testControllerFetcher) + cluster.garbageCollectAggregateCollectionStates(ctx, gcTimestamp, testControllerFetcher) assert.Empty(t, cluster.aggregateStateMap) assert.Empty(t, vpa.aggregateContainerStates) @@ -278,7 +281,7 @@ func TestClusterGCRateLimiting(t *testing.T) { sampleExpireTime := usageSample.MeasureStart.Add(9 * 24 * time.Hour) // AggregateContainerState are valid for 8 days since last sample but this run // doesn't remove the sample, because we didn't add it yet. - cluster.RateLimitedGarbageCollectAggregateCollectionStates(sampleExpireTime, testControllerFetcher) + cluster.RateLimitedGarbageCollectAggregateCollectionStates(ctx, sampleExpireTime, testControllerFetcher) vpa := addTestVpa(cluster) addTestPod(cluster) assert.NoError(t, cluster.AddOrUpdateContainer(testContainerID, testRequest)) @@ -290,12 +293,12 @@ func TestClusterGCRateLimiting(t *testing.T) { // Sample is expired but this run doesn't remove it yet, because less than testGcPeriod // elapsed since the previous run. - cluster.RateLimitedGarbageCollectAggregateCollectionStates(sampleExpireTime.Add(testGcPeriod/2), testControllerFetcher) + cluster.RateLimitedGarbageCollectAggregateCollectionStates(ctx, sampleExpireTime.Add(testGcPeriod/2), testControllerFetcher) assert.NotEmpty(t, cluster.aggregateStateMap) assert.NotEmpty(t, vpa.aggregateContainerStates) // AggregateContainerState should be deleted from both cluster and vpa - cluster.RateLimitedGarbageCollectAggregateCollectionStates(sampleExpireTime.Add(2*testGcPeriod), testControllerFetcher) + cluster.RateLimitedGarbageCollectAggregateCollectionStates(ctx, sampleExpireTime.Add(2*testGcPeriod), testControllerFetcher) assert.Empty(t, cluster.aggregateStateMap) assert.Empty(t, vpa.aggregateContainerStates) } diff --git a/vertical-pod-autoscaler/pkg/recommender/routines/recommender.go b/vertical-pod-autoscaler/pkg/recommender/routines/recommender.go index 5a89480e701e..bf3060e6202b 100644 --- a/vertical-pod-autoscaler/pkg/recommender/routines/recommender.go +++ b/vertical-pod-autoscaler/pkg/recommender/routines/recommender.go @@ -154,7 +154,7 @@ func (r *recommender) RunOnce() { klog.V(3).Infof("Recommender Run") - r.clusterStateFeeder.LoadVPAs() + r.clusterStateFeeder.LoadVPAs(ctx) timer.ObserveStep("LoadVPAs") r.clusterStateFeeder.LoadPods() @@ -170,7 +170,7 @@ func (r *recommender) RunOnce() { r.MaintainCheckpoints(ctx, *minCheckpointsPerRun) timer.ObserveStep("MaintainCheckpoints") - r.clusterState.RateLimitedGarbageCollectAggregateCollectionStates(time.Now(), r.controllerFetcher) + r.clusterState.RateLimitedGarbageCollectAggregateCollectionStates(ctx, time.Now(), r.controllerFetcher) timer.ObserveStep("GarbageCollect") klog.V(3).Infof("ClusterState is tracking %d aggregated container states", r.clusterState.StateMapSize()) } diff --git a/vertical-pod-autoscaler/pkg/target/controller_fetcher/controller_fetcher.go b/vertical-pod-autoscaler/pkg/target/controller_fetcher/controller_fetcher.go index dcbd11ee210d..54f38f3220a8 100644 --- a/vertical-pod-autoscaler/pkg/target/controller_fetcher/controller_fetcher.go +++ b/vertical-pod-autoscaler/pkg/target/controller_fetcher/controller_fetcher.go @@ -75,7 +75,7 @@ type ControllerKeyWithAPIVersion struct { // ControllerFetcher is responsible for finding the topmost well-known or scalable controller type ControllerFetcher interface { // FindTopMostWellKnownOrScalable returns topmost well-known or scalable controller. Error is returned if controller cannot be found. - FindTopMostWellKnownOrScalable(controller *ControllerKeyWithAPIVersion) (*ControllerKeyWithAPIVersion, error) + FindTopMostWellKnownOrScalable(ctx context.Context, controller *ControllerKeyWithAPIVersion) (*ControllerKeyWithAPIVersion, error) } type controllerFetcher struct { @@ -199,7 +199,7 @@ func getParentOfWellKnownController(informer cache.SharedIndexInformer, controll return nil, fmt.Errorf("don't know how to read owner controller") } -func (f *controllerFetcher) getParentOfController(controllerKey ControllerKeyWithAPIVersion) (*ControllerKeyWithAPIVersion, error) { +func (f *controllerFetcher) getParentOfController(ctx context.Context, controllerKey ControllerKeyWithAPIVersion) (*ControllerKeyWithAPIVersion, error) { kind := wellKnownController(controllerKey.Kind) informer, exists := f.informersMap[kind] if exists { @@ -211,7 +211,7 @@ func (f *controllerFetcher) getParentOfController(controllerKey ControllerKeyWit return nil, err } - owner, err := f.getOwnerForScaleResource(groupKind, controllerKey.Namespace, controllerKey.Name) + owner, err := f.getOwnerForScaleResource(ctx, groupKind, controllerKey.Namespace, controllerKey.Name) if apierrors.IsNotFound(err) { return nil, nil } @@ -244,16 +244,16 @@ func (f *controllerFetcher) isWellKnown(key *ControllerKeyWithAPIVersion) bool { return exists } -func (f *controllerFetcher) getScaleForResource(namespace string, groupResource schema.GroupResource, name string) (controller *autoscalingapi.Scale, err error) { +func (f *controllerFetcher) getScaleForResource(ctx context.Context, namespace string, groupResource schema.GroupResource, name string) (controller *autoscalingapi.Scale, err error) { if ok, scale, err := f.scaleSubresourceCacheStorage.Get(namespace, groupResource, name); ok { return scale, err } - scale, err := f.scaleNamespacer.Scales(namespace).Get(context.TODO(), groupResource, name, metav1.GetOptions{}) + scale, err := f.scaleNamespacer.Scales(namespace).Get(ctx, groupResource, name, metav1.GetOptions{}) f.scaleSubresourceCacheStorage.Insert(namespace, groupResource, name, scale, err) return scale, err } -func (f *controllerFetcher) isWellKnownOrScalable(key *ControllerKeyWithAPIVersion) bool { +func (f *controllerFetcher) isWellKnownOrScalable(ctx context.Context, key *ControllerKeyWithAPIVersion) bool { if f.isWellKnown(key) { return true } @@ -276,7 +276,7 @@ func (f *controllerFetcher) isWellKnownOrScalable(key *ControllerKeyWithAPIVersi for _, mapping := range mappings { groupResource := mapping.Resource.GroupResource() - scale, err := f.getScaleForResource(key.Namespace, groupResource, key.Name) + scale, err := f.getScaleForResource(ctx, key.Namespace, groupResource, key.Name) if err == nil && scale != nil { return true } @@ -284,7 +284,7 @@ func (f *controllerFetcher) isWellKnownOrScalable(key *ControllerKeyWithAPIVersi return false } -func (f *controllerFetcher) getOwnerForScaleResource(groupKind schema.GroupKind, namespace, name string) (*ControllerKeyWithAPIVersion, error) { +func (f *controllerFetcher) getOwnerForScaleResource(ctx context.Context, groupKind schema.GroupKind, namespace, name string) (*ControllerKeyWithAPIVersion, error) { if wellKnownController(groupKind.Kind) == node { // Some pods specify nods as their owners. This causes performance problems // in big clusters when VPA tries to get all nodes. We know nodes aren't @@ -298,7 +298,7 @@ func (f *controllerFetcher) getOwnerForScaleResource(groupKind schema.GroupKind, var lastError error for _, mapping := range mappings { groupResource := mapping.Resource.GroupResource() - scale, err := f.getScaleForResource(namespace, groupResource, name) + scale, err := f.getScaleForResource(ctx, namespace, groupResource, name) if err == nil { return getOwnerController(scale.OwnerReferences, namespace), nil } @@ -309,14 +309,14 @@ func (f *controllerFetcher) getOwnerForScaleResource(groupKind schema.GroupKind, return nil, lastError } -func (f *controllerFetcher) FindTopMostWellKnownOrScalable(key *ControllerKeyWithAPIVersion) (*ControllerKeyWithAPIVersion, error) { +func (f *controllerFetcher) FindTopMostWellKnownOrScalable(ctx context.Context, key *ControllerKeyWithAPIVersion) (*ControllerKeyWithAPIVersion, error) { if key == nil { return nil, nil } var topMostWellKnownOrScalable *ControllerKeyWithAPIVersion - wellKnownOrScalable := f.isWellKnownOrScalable(key) + wellKnownOrScalable := f.isWellKnownOrScalable(ctx, key) if wellKnownOrScalable { topMostWellKnownOrScalable = key } @@ -324,7 +324,7 @@ func (f *controllerFetcher) FindTopMostWellKnownOrScalable(key *ControllerKeyWit visited := make(map[ControllerKeyWithAPIVersion]bool) visited[*key] = true for { - owner, err := f.getParentOfController(*key) + owner, err := f.getParentOfController(ctx, *key) if err != nil { return nil, err } @@ -333,7 +333,7 @@ func (f *controllerFetcher) FindTopMostWellKnownOrScalable(key *ControllerKeyWit return topMostWellKnownOrScalable, nil } - wellKnownOrScalable = f.isWellKnownOrScalable(owner) + wellKnownOrScalable = f.isWellKnownOrScalable(ctx, owner) if wellKnownOrScalable { topMostWellKnownOrScalable = owner } diff --git a/vertical-pod-autoscaler/pkg/target/controller_fetcher/controller_fetcher_fake.go b/vertical-pod-autoscaler/pkg/target/controller_fetcher/controller_fetcher_fake.go index 2ff38be46e8a..b2a8d5eb0cc0 100644 --- a/vertical-pod-autoscaler/pkg/target/controller_fetcher/controller_fetcher_fake.go +++ b/vertical-pod-autoscaler/pkg/target/controller_fetcher/controller_fetcher_fake.go @@ -16,11 +16,13 @@ limitations under the License. package controllerfetcher +import "context" + // FakeControllerFetcher should be used in test only. It returns exactly the same controllerKey type FakeControllerFetcher struct{} // FindTopMostWellKnownOrScalable returns the same key for that fake implementation -func (f FakeControllerFetcher) FindTopMostWellKnownOrScalable(controller *ControllerKeyWithAPIVersion) (*ControllerKeyWithAPIVersion, error) { +func (f FakeControllerFetcher) FindTopMostWellKnownOrScalable(_ context.Context, controller *ControllerKeyWithAPIVersion) (*ControllerKeyWithAPIVersion, error) { return controller, nil } diff --git a/vertical-pod-autoscaler/pkg/target/controller_fetcher/controller_fetcher_test.go b/vertical-pod-autoscaler/pkg/target/controller_fetcher/controller_fetcher_test.go index 23eeb73321c9..15a59fbe9de9 100644 --- a/vertical-pod-autoscaler/pkg/target/controller_fetcher/controller_fetcher_test.go +++ b/vertical-pod-autoscaler/pkg/target/controller_fetcher/controller_fetcher_test.go @@ -17,6 +17,7 @@ limitations under the License. package controllerfetcher import ( + "context" "fmt" "testing" "time" @@ -48,8 +49,11 @@ const ( testReplicationController = "test-rc" ) -var wellKnownControllers = []wellKnownController{daemonSet, deployment, replicaSet, statefulSet, replicationController, job, cronJob} -var trueVar = true +var ( + ctx = context.Background() + wellKnownControllers = []wellKnownController{daemonSet, deployment, replicaSet, statefulSet, replicationController, job, cronJob} + trueVar = true +) func simpleControllerFetcher() *controllerFetcher { f := controllerFetcher{} @@ -405,7 +409,7 @@ func TestControllerFetcher(t *testing.T) { for _, obj := range tc.objects { addController(t, f, obj) } - topMostWellKnownOrScalableController, err := f.FindTopMostWellKnownOrScalable(tc.key) + topMostWellKnownOrScalableController, err := f.FindTopMostWellKnownOrScalable(ctx, tc.key) if tc.expectedKey == nil { assert.Nil(t, topMostWellKnownOrScalableController) } else { diff --git a/vertical-pod-autoscaler/pkg/target/fetcher.go b/vertical-pod-autoscaler/pkg/target/fetcher.go index f7cb73bd0118..9ad372587e18 100644 --- a/vertical-pod-autoscaler/pkg/target/fetcher.go +++ b/vertical-pod-autoscaler/pkg/target/fetcher.go @@ -50,7 +50,7 @@ const ( type VpaTargetSelectorFetcher interface { // Fetch returns a labelSelector used to gather Pods controlled by the given VPA. // If error is nil, the returned labelSelector is not nil. - Fetch(vpa *vpa_types.VerticalPodAutoscaler) (labels.Selector, error) + Fetch(ctx context.Context, vpa *vpa_types.VerticalPodAutoscaler) (labels.Selector, error) } type wellKnownController string @@ -116,7 +116,7 @@ type vpaTargetSelectorFetcher struct { informersMap map[wellKnownController]cache.SharedIndexInformer } -func (f *vpaTargetSelectorFetcher) Fetch(vpa *vpa_types.VerticalPodAutoscaler) (labels.Selector, error) { +func (f *vpaTargetSelectorFetcher) Fetch(ctx context.Context, vpa *vpa_types.VerticalPodAutoscaler) (labels.Selector, error) { if vpa.Spec.TargetRef == nil { return nil, fmt.Errorf("targetRef not defined. If this is a v1beta1 object switch to v1beta2.") } @@ -137,7 +137,7 @@ func (f *vpaTargetSelectorFetcher) Fetch(vpa *vpa_types.VerticalPodAutoscaler) ( Kind: vpa.Spec.TargetRef.Kind, } - selector, err := f.getLabelSelectorFromResource(groupKind, vpa.Namespace, vpa.Spec.TargetRef.Name) + selector, err := f.getLabelSelectorFromResource(ctx, groupKind, vpa.Namespace, vpa.Spec.TargetRef.Name) if err != nil { return nil, fmt.Errorf("Unhandled targetRef %s / %s / %s, last error %v", vpa.Spec.TargetRef.APIVersion, vpa.Spec.TargetRef.Kind, vpa.Spec.TargetRef.Name, err) @@ -173,7 +173,7 @@ func getLabelSelector(informer cache.SharedIndexInformer, kind, namespace, name } func (f *vpaTargetSelectorFetcher) getLabelSelectorFromResource( - groupKind schema.GroupKind, namespace, name string, + ctx context.Context, groupKind schema.GroupKind, namespace, name string, ) (labels.Selector, error) { mappings, err := f.mapper.RESTMappings(groupKind) if err != nil { @@ -183,7 +183,7 @@ func (f *vpaTargetSelectorFetcher) getLabelSelectorFromResource( var lastError error for _, mapping := range mappings { groupResource := mapping.Resource.GroupResource() - scale, err := f.scaleNamespacer.Scales(namespace).Get(context.TODO(), groupResource, name, metav1.GetOptions{}) + scale, err := f.scaleNamespacer.Scales(namespace).Get(ctx, groupResource, name, metav1.GetOptions{}) if err == nil { if scale.Status.Selector == "" { return nil, fmt.Errorf("Resource %s/%s has an empty selector for scale sub-resource", namespace, name) diff --git a/vertical-pod-autoscaler/pkg/target/mock/fetcher_mock.go b/vertical-pod-autoscaler/pkg/target/mock/fetcher_mock.go index c3fb3456b156..ce8796bbaa97 100644 --- a/vertical-pod-autoscaler/pkg/target/mock/fetcher_mock.go +++ b/vertical-pod-autoscaler/pkg/target/mock/fetcher_mock.go @@ -17,6 +17,8 @@ limitations under the License. package mocktarget import ( + "context" + "github.com/golang/mock/gomock" "k8s.io/apimachinery/pkg/labels" vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1" @@ -46,7 +48,7 @@ func (_m *MockVpaTargetSelectorFetcher) EXPECT() *_MockVpaTargetSelectorFetcherR } // Fetch enables configuring expectations on Fetch method -func (_m *MockVpaTargetSelectorFetcher) Fetch(vpa *vpa_types.VerticalPodAutoscaler) (labels.Selector, error) { +func (_m *MockVpaTargetSelectorFetcher) Fetch(_ context.Context, vpa *vpa_types.VerticalPodAutoscaler) (labels.Selector, error) { ret := _m.ctrl.Call(_m, "Fetch", vpa) ret0, _ := ret[0].(labels.Selector) ret1, _ := ret[1].(error) diff --git a/vertical-pod-autoscaler/pkg/updater/logic/updater.go b/vertical-pod-autoscaler/pkg/updater/logic/updater.go index 7e9c7f8a1e2f..81db0d6ebe2f 100644 --- a/vertical-pod-autoscaler/pkg/updater/logic/updater.go +++ b/vertical-pod-autoscaler/pkg/updater/logic/updater.go @@ -142,7 +142,7 @@ func (u *updater) RunOnce(ctx context.Context) { klog.V(3).Infof("skipping VPA object %s because its mode is not \"Recreate\" or \"Auto\"", klog.KObj(vpa)) continue } - selector, err := u.selectorFetcher.Fetch(vpa) + selector, err := u.selectorFetcher.Fetch(ctx, vpa) if err != nil { klog.V(3).Infof("skipping VPA object %s because we cannot fetch selector", klog.KObj(vpa)) continue @@ -172,7 +172,7 @@ func (u *updater) RunOnce(ctx context.Context) { controlledPods := make(map[*vpa_types.VerticalPodAutoscaler][]*apiv1.Pod) for _, pod := range allLivePods { - controllingVPA := vpa_api_util.GetControllingVPAForPod(pod, vpas, u.controllerFetcher) + controllingVPA := vpa_api_util.GetControllingVPAForPod(ctx, pod, vpas, u.controllerFetcher) if controllingVPA != nil { controlledPods[controllingVPA.Vpa] = append(controlledPods[controllingVPA.Vpa], pod) } diff --git a/vertical-pod-autoscaler/pkg/utils/vpa/api.go b/vertical-pod-autoscaler/pkg/utils/vpa/api.go index 42fa0bb54d27..819502bc5406 100644 --- a/vertical-pod-autoscaler/pkg/utils/vpa/api.go +++ b/vertical-pod-autoscaler/pkg/utils/vpa/api.go @@ -127,7 +127,7 @@ func stronger(a, b *vpa_types.VerticalPodAutoscaler) bool { } // GetControllingVPAForPod chooses the earliest created VPA from the input list that matches the given Pod. -func GetControllingVPAForPod(pod *core.Pod, vpas []*VpaWithSelector, ctrlFetcher controllerfetcher.ControllerFetcher) *VpaWithSelector { +func GetControllingVPAForPod(ctx context.Context, pod *core.Pod, vpas []*VpaWithSelector, ctrlFetcher controllerfetcher.ControllerFetcher) *VpaWithSelector { var ownerRefrence *meta.OwnerReference for i := range pod.OwnerReferences { @@ -148,7 +148,7 @@ func GetControllingVPAForPod(pod *core.Pod, vpas []*VpaWithSelector, ctrlFetcher }, ApiVersion: ownerRefrence.APIVersion, } - parentController, err := ctrlFetcher.FindTopMostWellKnownOrScalable(k) + parentController, err := ctrlFetcher.FindTopMostWellKnownOrScalable(ctx, k) if err != nil { klog.Errorf("fail to get pod controller: pod=%s err=%s", klog.KObj(pod), err.Error()) return nil diff --git a/vertical-pod-autoscaler/pkg/utils/vpa/api_test.go b/vertical-pod-autoscaler/pkg/utils/vpa/api_test.go index ecee638d8766..c0aade6683c6 100644 --- a/vertical-pod-autoscaler/pkg/utils/vpa/api_test.go +++ b/vertical-pod-autoscaler/pkg/utils/vpa/api_test.go @@ -17,6 +17,7 @@ limitations under the License. package api import ( + "context" "flag" "testing" "time" @@ -39,6 +40,8 @@ const ( ) var ( + ctx = context.Background() + anytime = time.Unix(0, 0) ) @@ -142,7 +145,7 @@ func TestPodMatchesVPA(t *testing.T) { type NilControllerFetcher struct{} // FindTopMostWellKnownOrScalable returns the same key for that fake implementation -func (f NilControllerFetcher) FindTopMostWellKnownOrScalable(_ *controllerfetcher.ControllerKeyWithAPIVersion) (*controllerfetcher.ControllerKeyWithAPIVersion, error) { +func (f NilControllerFetcher) FindTopMostWellKnownOrScalable(_ context.Context, _ *controllerfetcher.ControllerKeyWithAPIVersion) (*controllerfetcher.ControllerKeyWithAPIVersion, error) { return nil, nil } @@ -174,7 +177,7 @@ func TestGetControllingVPAForPod(t *testing.T) { Name: "test-sts", APIVersion: "apps/v1", } - chosen := GetControllingVPAForPod(pod, []*VpaWithSelector{ + chosen := GetControllingVPAForPod(ctx, pod, []*VpaWithSelector{ {vpaB, parseLabelSelector("app = testingApp")}, {vpaA, parseLabelSelector("app = testingApp")}, {nonMatchingVPA, parseLabelSelector("app = other")}, @@ -184,7 +187,7 @@ func TestGetControllingVPAForPod(t *testing.T) { // For some Pods (which are *not* under VPA), controllerFetcher.FindTopMostWellKnownOrScalable will return `nil`, e.g. when the Pod owner is a custom resource, which doesn't implement the /scale subresource // See pkg/target/controller_fetcher/controller_fetcher_test.go:393 for testing this behavior // This test case makes sure that GetControllingVPAForPod will just return `nil` in that case as well - chosen = GetControllingVPAForPod(pod, []*VpaWithSelector{{vpaA, parseLabelSelector("app = testingApp")}}, &NilControllerFetcher{}) + chosen = GetControllingVPAForPod(ctx, pod, []*VpaWithSelector{{vpaA, parseLabelSelector("app = testingApp")}}, &NilControllerFetcher{}) assert.Nil(t, chosen) } From 598d1035312ce50f9074cae7dda084227d283953 Mon Sep 17 00:00:00 2001 From: Ismail Alidzhikov Date: Tue, 2 Jul 2024 18:01:36 +0300 Subject: [PATCH 2/2] Address review comments from kwiesmueller --- .../pkg/recommender/model/cluster_test.go | 16 ++++++++++++++-- .../controller_fetcher_test.go | 3 +-- .../pkg/utils/vpa/api_test.go | 4 ++-- 3 files changed, 17 insertions(+), 6 deletions(-) diff --git a/vertical-pod-autoscaler/pkg/recommender/model/cluster_test.go b/vertical-pod-autoscaler/pkg/recommender/model/cluster_test.go index a0780f4e82a9..00a416ae258e 100644 --- a/vertical-pod-autoscaler/pkg/recommender/model/cluster_test.go +++ b/vertical-pod-autoscaler/pkg/recommender/model/cluster_test.go @@ -35,8 +35,6 @@ import ( ) var ( - ctx = context.Background() - testPodID = PodID{"namespace-1", "pod-1"} testPodID3 = PodID{"namespace-1", "pod-3"} testPodID4 = PodID{"namespace-1", "pod-4"} @@ -100,6 +98,8 @@ func TestClusterAddSample(t *testing.T) { } func TestClusterGCAggregateContainerStateDeletesOld(t *testing.T) { + ctx := context.Background() + // Create a pod with a single container. cluster := NewClusterState(testGcPeriod) vpa := addTestVpa(cluster) @@ -123,6 +123,8 @@ func TestClusterGCAggregateContainerStateDeletesOld(t *testing.T) { } func TestClusterGCAggregateContainerStateDeletesOldEmpty(t *testing.T) { + ctx := context.Background() + // Create a pod with a single container. cluster := NewClusterState(testGcPeriod) vpa := addTestVpa(cluster) @@ -154,6 +156,8 @@ func TestClusterGCAggregateContainerStateDeletesOldEmpty(t *testing.T) { } func TestClusterGCAggregateContainerStateDeletesEmptyInactiveWithoutController(t *testing.T) { + ctx := context.Background() + // Create a pod with a single container. cluster := NewClusterState(testGcPeriod) vpa := addTestVpa(cluster) @@ -186,6 +190,8 @@ func TestClusterGCAggregateContainerStateDeletesEmptyInactiveWithoutController(t } func TestClusterGCAggregateContainerStateLeavesEmptyInactiveWithController(t *testing.T) { + ctx := context.Background() + // Create a pod with a single container. cluster := NewClusterState(testGcPeriod) vpa := addTestVpa(cluster) @@ -214,6 +220,8 @@ func TestClusterGCAggregateContainerStateLeavesEmptyInactiveWithController(t *te } func TestClusterGCAggregateContainerStateLeavesValid(t *testing.T) { + ctx := context.Background() + // Create a pod with a single container. cluster := NewClusterState(testGcPeriod) vpa := addTestVpa(cluster) @@ -236,6 +244,8 @@ func TestClusterGCAggregateContainerStateLeavesValid(t *testing.T) { } func TestAddSampleAfterAggregateContainerStateGCed(t *testing.T) { + ctx := context.Background() + // Create a pod with a single container. cluster := NewClusterState(testGcPeriod) vpa := addTestVpa(cluster) @@ -275,6 +285,8 @@ func TestAddSampleAfterAggregateContainerStateGCed(t *testing.T) { } func TestClusterGCRateLimiting(t *testing.T) { + ctx := context.Background() + // Create a pod with a single container. cluster := NewClusterState(testGcPeriod) usageSample := makeTestUsageSample() diff --git a/vertical-pod-autoscaler/pkg/target/controller_fetcher/controller_fetcher_test.go b/vertical-pod-autoscaler/pkg/target/controller_fetcher/controller_fetcher_test.go index 15a59fbe9de9..93bfb1421420 100644 --- a/vertical-pod-autoscaler/pkg/target/controller_fetcher/controller_fetcher_test.go +++ b/vertical-pod-autoscaler/pkg/target/controller_fetcher/controller_fetcher_test.go @@ -50,7 +50,6 @@ const ( ) var ( - ctx = context.Background() wellKnownControllers = []wellKnownController{daemonSet, deployment, replicaSet, statefulSet, replicationController, job, cronJob} trueVar = true ) @@ -409,7 +408,7 @@ func TestControllerFetcher(t *testing.T) { for _, obj := range tc.objects { addController(t, f, obj) } - topMostWellKnownOrScalableController, err := f.FindTopMostWellKnownOrScalable(ctx, tc.key) + topMostWellKnownOrScalableController, err := f.FindTopMostWellKnownOrScalable(context.Background(), tc.key) if tc.expectedKey == nil { assert.Nil(t, topMostWellKnownOrScalableController) } else { diff --git a/vertical-pod-autoscaler/pkg/utils/vpa/api_test.go b/vertical-pod-autoscaler/pkg/utils/vpa/api_test.go index c0aade6683c6..2b6f4679103d 100644 --- a/vertical-pod-autoscaler/pkg/utils/vpa/api_test.go +++ b/vertical-pod-autoscaler/pkg/utils/vpa/api_test.go @@ -40,8 +40,6 @@ const ( ) var ( - ctx = context.Background() - anytime = time.Unix(0, 0) ) @@ -152,6 +150,8 @@ func (f NilControllerFetcher) FindTopMostWellKnownOrScalable(_ context.Context, var _ controllerfetcher.ControllerFetcher = &NilControllerFetcher{} func TestGetControllingVPAForPod(t *testing.T) { + ctx := context.Background() + isController := true pod := test.Pod().WithName("test-pod").AddContainer(test.Container().WithName(containerName).WithCPURequest(resource.MustParse("1")).WithMemRequest(resource.MustParse("100M")).Get()).Get() pod.Labels = map[string]string{"app": "testingApp"}