|
| 1 | +// Unless explicitly stated otherwise all files in this repository are licensed |
| 2 | +// under the Apache License Version 2.0. |
| 3 | +// This product includes software developed at Datadog (https://www.datadoghq.com/). |
| 4 | +// Copyright 2016-present Datadog, Inc. |
| 5 | + |
| 6 | +package npm |
| 7 | + |
| 8 | +import ( |
| 9 | + "context" |
| 10 | + "encoding/json" |
| 11 | + "fmt" |
| 12 | + "strings" |
| 13 | + "testing" |
| 14 | + "time" |
| 15 | + |
| 16 | + "github.com/DataDog/agent-payload/v5/process" |
| 17 | + "github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes" |
| 18 | + corev1 "github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes/core/v1" |
| 19 | + "github.com/pulumi/pulumi/sdk/v3/go/pulumi" |
| 20 | + "github.com/stretchr/testify/assert" |
| 21 | + "github.com/stretchr/testify/require" |
| 22 | + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" |
| 23 | + |
| 24 | + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" |
| 25 | + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments" |
| 26 | + awskubernetes "github.com/DataDog/datadog-agent/test/new-e2e/pkg/provisioners/aws/kubernetes" |
| 27 | + "github.com/DataDog/test-infra-definitions/common/config" |
| 28 | + npmtools "github.com/DataDog/test-infra-definitions/components/datadog/apps/npm-tools" |
| 29 | + "github.com/DataDog/test-infra-definitions/components/datadog/kubernetesagentparams" |
| 30 | + kubeComp "github.com/DataDog/test-infra-definitions/components/kubernetes" |
| 31 | + "github.com/DataDog/test-infra-definitions/components/kubernetes/cilium" |
| 32 | + "github.com/DataDog/test-infra-definitions/components/kubernetes/istio" |
| 33 | +) |
| 34 | + |
| 35 | +type ciliumLBConntrackerTestSuite struct { |
| 36 | + e2e.BaseSuite[environments.Kubernetes] |
| 37 | + |
| 38 | + httpBinService *corev1.Service |
| 39 | +} |
| 40 | + |
| 41 | +func TestCiliumLBConntracker(t *testing.T) { |
| 42 | + // TODO: find a way to update this list dynamically |
| 43 | + versionsToTest := []string{"1.15.14", "1.16.7", "1.17.1"} |
| 44 | + for _, v := range versionsToTest { |
| 45 | + t.Run(fmt.Sprintf("version %s", v), func(_t *testing.T) { |
| 46 | + _t.Parallel() |
| 47 | + |
| 48 | + testCiliumLBConntracker(t, v) |
| 49 | + }) |
| 50 | + } |
| 51 | +} |
| 52 | + |
| 53 | +func testCiliumLBConntracker(t *testing.T, ciliumVersion string) { |
| 54 | + t.Helper() |
| 55 | + |
| 56 | + suite := &ciliumLBConntrackerTestSuite{} |
| 57 | + |
| 58 | + httpBinServiceInstall := func(e config.Env, kubeProvider *kubernetes.Provider) (*kubeComp.Workload, error) { |
| 59 | + var err error |
| 60 | + suite.httpBinService, err = istio.NewHttpbinServiceInstallation(e, pulumi.Provider(kubeProvider)) |
| 61 | + return &kubeComp.Workload{}, err |
| 62 | + } |
| 63 | + |
| 64 | + npmToolsWorkload := func(e config.Env, kubeProvider *kubernetes.Provider) (*kubeComp.Workload, error) { |
| 65 | + // NPM tools Workload |
| 66 | + return npmtools.K8sAppDefinition(e, kubeProvider, "npmtools", "http://httpbin.default.svc.cluster.local:8000") |
| 67 | + } |
| 68 | + |
| 69 | + ciliumHelmValues := map[string]pulumi.Input{ |
| 70 | + "kubeProxyReplacement": pulumi.BoolPtr(true), |
| 71 | + "ipam": pulumi.Map{ |
| 72 | + "method": pulumi.StringPtr("kubernetes"), |
| 73 | + }, |
| 74 | + "socketLB": pulumi.Map{ |
| 75 | + "hostNamespaceOnly": pulumi.BoolPtr(true), |
| 76 | + }, |
| 77 | + "image": pulumi.Map{ |
| 78 | + "tag": pulumi.StringPtr(ciliumVersion), |
| 79 | + }, |
| 80 | + } |
| 81 | + |
| 82 | + name := strings.ReplaceAll(fmt.Sprintf("cilium-lb-%s", ciliumVersion), ".", "-") |
| 83 | + e2e.Run(t, suite, |
| 84 | + e2e.WithStackName(fmt.Sprintf("stack-%s", name)), |
| 85 | + e2e.WithProvisioner( |
| 86 | + awskubernetes.KindProvisioner( |
| 87 | + awskubernetes.WithName(name), |
| 88 | + awskubernetes.WithCiliumOptions(cilium.WithHelmValues(ciliumHelmValues), cilium.WithVersion(ciliumVersion)), |
| 89 | + awskubernetes.WithAgentOptions(kubernetesagentparams.WithHelmValues(systemProbeConfigWithCiliumLB)), |
| 90 | + awskubernetes.WithWorkloadApp(httpBinServiceInstall), |
| 91 | + awskubernetes.WithWorkloadApp(npmToolsWorkload), |
| 92 | + ), |
| 93 | + ), |
| 94 | + ) |
| 95 | +} |
| 96 | + |
| 97 | +// BeforeTest will be called before each test |
| 98 | +func (suite *ciliumLBConntrackerTestSuite) BeforeTest(suiteName, testName string) { |
| 99 | + suite.BaseSuite.BeforeTest(suiteName, testName) |
| 100 | + // default is to reset the current state of the fakeintake aggregators |
| 101 | + if !suite.BaseSuite.IsDevMode() { |
| 102 | + suite.Env().FakeIntake.Client().FlushServerAndResetAggregators() |
| 103 | + } |
| 104 | +} |
| 105 | + |
| 106 | +// AfterTest will be called after each test |
| 107 | +func (suite *ciliumLBConntrackerTestSuite) AfterTest(suiteName, testName string) { |
| 108 | + test1HostFakeIntakeNPMDumpInfo(suite.T(), suite.Env().FakeIntake) |
| 109 | + |
| 110 | + suite.BaseSuite.AfterTest(suiteName, testName) |
| 111 | +} |
| 112 | + |
| 113 | +func (suite *ciliumLBConntrackerTestSuite) TestCiliumConntracker() { |
| 114 | + fakeIntake := suite.Env().FakeIntake |
| 115 | + |
| 116 | + var hostname string |
| 117 | + suite.Require().EventuallyWithT(func(collect *assert.CollectT) { |
| 118 | + names, err := fakeIntake.Client().GetConnectionsNames() |
| 119 | + if assert.NoError(collect, err, "error getting connection names") && |
| 120 | + assert.NotEmpty(collect, names) { |
| 121 | + hostname = names[0] |
| 122 | + } |
| 123 | + }, time.Minute, time.Second, "timed out getting connection names") |
| 124 | + |
| 125 | + var svcConns []*process.Connection |
| 126 | + suite.Require().EventuallyWithT(func(collect *assert.CollectT) { |
| 127 | + cnx, err := fakeIntake.Client().GetConnections() |
| 128 | + require.NoError(collect, err, "error getting connections") |
| 129 | + payloads := cnx.GetPayloadsByName(hostname) |
| 130 | + // only look at the last two payloads |
| 131 | + require.Greater(collect, len(payloads), 1, "at least 2 payloads not present") |
| 132 | + |
| 133 | + svcConns = nil |
| 134 | + for _, c := range append(payloads[len(payloads)-2].Connections, payloads[len(payloads)-1].Connections...) { |
| 135 | + if c.Raddr.Port != 8000 { |
| 136 | + return |
| 137 | + } |
| 138 | + |
| 139 | + if !assert.NotNil(collect, c.IpTranslation, "ip translation is nil for service connection") { |
| 140 | + return |
| 141 | + } |
| 142 | + |
| 143 | + svcConns = append(svcConns, c) |
| 144 | + } |
| 145 | + |
| 146 | + assert.NotEmpty(collect, svcConns, "no connections for service found") |
| 147 | + }, time.Minute, time.Second, "could not find connections for service") |
| 148 | + |
| 149 | + backends, frontendIP := suite.httpBinCiliumService() |
| 150 | + for _, c := range svcConns { |
| 151 | + suite.Assert().Equalf(frontendIP, c.Raddr.Ip, "front end address not equal to connection raddr") |
| 152 | + suite.Assert().Conditionf(func() bool { |
| 153 | + for _, be := range backends { |
| 154 | + if be.ip == c.IpTranslation.ReplSrcIP && be.port == uint16(c.IpTranslation.ReplSrcPort) { |
| 155 | + return true |
| 156 | + } |
| 157 | + } |
| 158 | + |
| 159 | + return false |
| 160 | + }, "") |
| 161 | + } |
| 162 | +} |
| 163 | + |
| 164 | +type ciliumBackend struct { |
| 165 | + ip string |
| 166 | + port uint16 |
| 167 | +} |
| 168 | + |
| 169 | +func (suite *ciliumLBConntrackerTestSuite) httpBinCiliumService() (backends []ciliumBackend, frontendIP string) { |
| 170 | + t := suite.T() |
| 171 | + t.Helper() |
| 172 | + |
| 173 | + var stdout string |
| 174 | + require.EventuallyWithT(t, func(collect *assert.CollectT) { |
| 175 | + ciliumPods, err := suite.Env().KubernetesCluster.Client().CoreV1().Pods("kube-system").List(context.Background(), v1.ListOptions{ |
| 176 | + LabelSelector: "k8s-app=cilium", |
| 177 | + }) |
| 178 | + require.NoError(collect, err, "could no get cilium pods") |
| 179 | + require.NotNil(collect, ciliumPods, "cilium pods object is nil") |
| 180 | + require.NotEmpty(collect, ciliumPods.Items, "no cilium pods found") |
| 181 | + |
| 182 | + pod := ciliumPods.Items[0] |
| 183 | + var stderr string |
| 184 | + stdout, stderr, err = suite.Env().KubernetesCluster.KubernetesClient.PodExec("kube-system", pod.Name, "cilium-agent", []string{"cilium-dbg", "service", "list", "-o", "json"}) |
| 185 | + require.NoError(collect, err, "error getting cilium service list") |
| 186 | + require.Empty(collect, stderr, "got output on stderr from cilium service list command", stderr) |
| 187 | + require.NotEmpty(collect, stdout, "empty output from cilium-dbg service list command") |
| 188 | + }, 20*time.Second, time.Second, "could not get cilium-agent pod") |
| 189 | + |
| 190 | + var services []interface{} |
| 191 | + err := json.Unmarshal([]byte(stdout), &services) |
| 192 | + suite.Require().NoError(err, "error deserializing output of cilium-dbg service list command") |
| 193 | + for _, svc := range services { |
| 194 | + spec := svc.(map[string]interface{})["spec"].(map[string]interface{}) |
| 195 | + frontendAddr := spec["frontend-address"].(map[string]interface{}) |
| 196 | + if frontendAddrPort := frontendAddr["port"].(float64); frontendAddrPort != 8000 { |
| 197 | + continue |
| 198 | + } |
| 199 | + if frontendAddrProto, ok := frontendAddr["protocol"]; ok && frontendAddrProto.(string) != "TCP" { |
| 200 | + continue |
| 201 | + } |
| 202 | + |
| 203 | + frontendIP = frontendAddr["ip"].(string) |
| 204 | + _backendAddrs := spec["backend-addresses"].([]interface{}) |
| 205 | + for _, be := range _backendAddrs { |
| 206 | + be := be.(map[string]interface{}) |
| 207 | + backends = append(backends, ciliumBackend{ |
| 208 | + ip: be["ip"].(string), |
| 209 | + port: uint16(be["port"].(float64)), |
| 210 | + }) |
| 211 | + } |
| 212 | + |
| 213 | + break |
| 214 | + } |
| 215 | + |
| 216 | + return backends, frontendIP |
| 217 | + |
| 218 | +} |
0 commit comments