From 0d67f403d5c64b95b2fb6f55ae421a4864740e00 Mon Sep 17 00:00:00 2001 From: hiteshwani <110378441+hiteshwani@users.noreply.github.com> Date: Wed, 13 Sep 2023 17:35:07 +0530 Subject: [PATCH] Refactored slaves pod configuration and added validation for pod name (#8) Signed-off-by: hiteshwani --- contrib/executor/jmeterd/pkg/runner/runner.go | 6 +- contrib/executor/jmeterd/pkg/slaves/client.go | 117 +++++++++++++++--- contrib/executor/jmeterd/pkg/slaves/utils.go | 115 ++++------------- 3 files changed, 126 insertions(+), 112 deletions(-) diff --git a/contrib/executor/jmeterd/pkg/runner/runner.go b/contrib/executor/jmeterd/pkg/runner/runner.go index b5df6c4a566..c8cad0ffb6c 100644 --- a/contrib/executor/jmeterd/pkg/runner/runner.go +++ b/contrib/executor/jmeterd/pkg/runner/runner.go @@ -161,17 +161,17 @@ func (r *JMeterDRunner) Run(ctx context.Context, execution testkube.Execution) ( slavesClient, err := slaves.NewClient(execution, r.Params, slavesEnvVariables) if err != nil { - return *result.WithErrors(errors.Errorf("Getting error while creating slaves client: %v", err)), nil + return *result.WithErrors(errors.Wrap(err, "error creating slaves client")), nil } //creating slaves provided in SLAVES_COUNT env variable slavesNameIpMap, err := slavesClient.CreateSlaves(ctx) if err != nil { - return *result.WithErrors(errors.Errorf("Getting error while creating slaves nodes: %v", err)), nil + return *result.WithErrors(errors.Wrap(err, "error creating slaves")), nil } defer slavesClient.DeleteSlaves(ctx, slavesNameIpMap) - args = append(args, fmt.Sprintf("-R %v", slavesClient.GetSlavesIpString(slavesNameIpMap))) + args = append(args, fmt.Sprintf("-R %v", slaves.GetSlavesIpString(slavesNameIpMap))) for i := range args { if args[i] == "" { diff --git a/contrib/executor/jmeterd/pkg/slaves/client.go b/contrib/executor/jmeterd/pkg/slaves/client.go index 60205b71506..92ab4d3342d 100644 --- a/contrib/executor/jmeterd/pkg/slaves/client.go +++ b/contrib/executor/jmeterd/pkg/slaves/client.go @@ -2,19 +2,22 @@ package slaves import ( "context" + "encoding/json" "fmt" - "strings" "time" + "github.com/pkg/errors" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + "github.com/kubeshop/testkube/contrib/executor/jmeterd/pkg/jmeterenv" "github.com/kubeshop/testkube/pkg/api/v1/testkube" "github.com/kubeshop/testkube/pkg/envs" "github.com/kubeshop/testkube/pkg/executor/output" "github.com/kubeshop/testkube/pkg/k8sclient" - "github.com/pkg/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/kubernetes" ) const ( @@ -50,7 +53,7 @@ func NewClient(execution testkube.Execution, envParams envs.Params, slavesEnvVar }, nil } -// creating slaves as per count provided in the SLAVES_CLOUNT env variable. +// CreateSlaves creates slaves as per count provided in the SLAVES_CLOUNT env variable. // Default SLAVES_COUNT would be 1 if not provided in the env variables func (client *Client) CreateSlaves(ctx context.Context) (map[string]string, error) { slavesCount, err := getSlavesCount(client.envVariables[jmeterenv.SlavesCount]) @@ -84,15 +87,14 @@ func (client *Client) CreateSlaves(ctx context.Context) (map[string]string, erro return podIPAddresses, nil } -// created slaves pod and send its ipaddress on the podIPAddressChan channel when pod is in the ready state +// createSlavePod creates a slave pod and sends its IP address on the podIPAddressChan +// channel when the pod is in the ready state. func (client *Client) createSlavePod(ctx context.Context, currentSlavesCount int, podIPAddressChan chan<- map[string]string, errorChan chan<- error) { - - slavePod, err := getSlavePodConfiguration(client.execution.Name, client.execution, client.envVariables, client.envParams) + slavePod, err := client.getSlavePodConfiguration(currentSlavesCount) if err != nil { errorChan <- err return } - slavePod.Name = fmt.Sprintf("%s-%v-%v", slavePod.Name, currentSlavesCount, client.execution.Id) p, err := client.clientSet.CoreV1().Pods(client.namespace).Create(ctx, slavePod, metav1.CreateOptions{}) if err != nil { @@ -120,6 +122,93 @@ func (client *Client) createSlavePod(ctx context.Context, currentSlavesCount int podIPAddressChan <- podNameIpMap } +func (client *Client) getSlavePodConfiguration(currentSlavesCount int) (*v1.Pod, error) { + runnerExecutionStr, err := json.Marshal(client.execution) + if err != nil { + return nil, err + } + + podName := ValidateAndGetSlavePodName(client.execution.TestName, client.execution.Id, currentSlavesCount) + if err != nil { + return nil, err + } + return &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: podName, + }, + Spec: v1.PodSpec{ + RestartPolicy: v1.RestartPolicyAlways, + InitContainers: []v1.Container{ + { + Name: "init", + Image: "kubeshop/testkube-init-executor:1.14.3", + Command: []string{"/bin/runner", string(runnerExecutionStr)}, + Env: getSlaveRunnerEnv(client.envParams, client.execution), + ImagePullPolicy: v1.PullIfNotPresent, + VolumeMounts: []v1.VolumeMount{ + { + MountPath: "/data", + Name: "data-volume", + }, + }, + }, + }, + Containers: []v1.Container{ + { + Name: "main", + Image: "kubeshop/testkube-jmeterd-slaves:999.0.0", + Env: getSlaveConfigurationEnv(client.envVariables), + ImagePullPolicy: v1.PullIfNotPresent, + Ports: []v1.ContainerPort{ + { + ContainerPort: serverPort, + Name: "server-port", + }, { + ContainerPort: localPort, + Name: "local-port", + }, + }, + VolumeMounts: []v1.VolumeMount{ + { + MountPath: "/data", + Name: "data-volume", + }, + }, + LivenessProbe: &v1.Probe{ + ProbeHandler: v1.ProbeHandler{ + TCPSocket: &v1.TCPSocketAction{ + Port: intstr.FromInt(serverPort), + }, + }, + FailureThreshold: 3, + PeriodSeconds: 5, + SuccessThreshold: 1, + TimeoutSeconds: 1, + }, + ReadinessProbe: &v1.Probe{ + ProbeHandler: v1.ProbeHandler{ + TCPSocket: &v1.TCPSocketAction{ + Port: intstr.FromInt(serverPort), + }, + }, + FailureThreshold: 3, + InitialDelaySeconds: 10, + PeriodSeconds: 5, + TimeoutSeconds: 1, + }, + }, + }, + Volumes: []v1.Volume{ + { + Name: "data-volume", + VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}, + }, + }, + }, + }, nil +} + +// DeleteSlaves do the cleanup slaves pods after execution of test func (client *Client) DeleteSlaves(ctx context.Context, slaveNameIpMap map[string]string) error { for slaveName := range slaveNameIpMap { output.PrintLog(fmt.Sprintf("Deleting slave %v", slaveName)) @@ -132,11 +221,3 @@ func (client *Client) DeleteSlaves(ctx context.Context, slaveNameIpMap map[strin } return nil } - -func (client *Client) GetSlavesIpString(podNameIpMap map[string]string) string { - podIps := []string{} - for _, ip := range podNameIpMap { - podIps = append(podIps, ip) - } - return strings.Join(podIps, ",") -} diff --git a/contrib/executor/jmeterd/pkg/slaves/utils.go b/contrib/executor/jmeterd/pkg/slaves/utils.go index d20302da8f5..d349d3176d7 100644 --- a/contrib/executor/jmeterd/pkg/slaves/utils.go +++ b/contrib/executor/jmeterd/pkg/slaves/utils.go @@ -2,18 +2,18 @@ package slaves import ( "context" - "encoding/json" "fmt" "strconv" + "strings" - "github.com/kubeshop/testkube/pkg/api/v1/testkube" - "github.com/kubeshop/testkube/pkg/envs" - "github.com/kubeshop/testkube/pkg/executor/output" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" + + "github.com/kubeshop/testkube/pkg/api/v1/testkube" + "github.com/kubeshop/testkube/pkg/envs" + "github.com/kubeshop/testkube/pkg/executor/output" ) const ( @@ -100,92 +100,6 @@ func getSlaveConfigurationEnv(slaveEnv map[string]testkube.Variable) []v1.EnvVar return envVars } -func getSlavePodConfiguration(testName string, runnerExecution testkube.Execution, envVariables map[string]testkube.Variable, envParams envs.Params) (*v1.Pod, error) { - runnerExecutionStr, err := json.Marshal(runnerExecution) - if err != nil { - return nil, err - } - - podName := fmt.Sprintf("%s-jmeter-slave", testName) - if err != nil { - return nil, err - } - return &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: podName, - }, - Spec: v1.PodSpec{ - RestartPolicy: v1.RestartPolicyAlways, - InitContainers: []v1.Container{ - { - Name: "init", - Image: "kubeshop/testkube-init-executor:1.14.3", - Command: []string{"/bin/runner", string(runnerExecutionStr)}, - Env: getSlaveRunnerEnv(envParams, runnerExecution), - ImagePullPolicy: v1.PullIfNotPresent, - VolumeMounts: []v1.VolumeMount{ - { - MountPath: "/data", - Name: "data-volume", - }, - }, - }, - }, - Containers: []v1.Container{ - { - Name: "main", - Image: "kubeshop/testkube-jmeterd-slaves:999.0.0", - Env: getSlaveConfigurationEnv(envVariables), - ImagePullPolicy: v1.PullIfNotPresent, - Ports: []v1.ContainerPort{ - { - ContainerPort: serverPort, - Name: "server-port", - }, { - ContainerPort: localPort, - Name: "local-port", - }, - }, - VolumeMounts: []v1.VolumeMount{ - { - MountPath: "/data", - Name: "data-volume", - }, - }, - LivenessProbe: &v1.Probe{ - ProbeHandler: v1.ProbeHandler{ - TCPSocket: &v1.TCPSocketAction{ - Port: intstr.FromInt(serverPort), - }, - }, - FailureThreshold: 3, - PeriodSeconds: 5, - SuccessThreshold: 1, - TimeoutSeconds: 1, - }, - ReadinessProbe: &v1.Probe{ - ProbeHandler: v1.ProbeHandler{ - TCPSocket: &v1.TCPSocketAction{ - Port: intstr.FromInt(serverPort), - }, - }, - FailureThreshold: 3, - InitialDelaySeconds: 10, - PeriodSeconds: 5, - TimeoutSeconds: 1, - }, - }, - }, - Volumes: []v1.Volume{ - { - Name: "data-volume", - VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}, - }, - }, - }, - }, nil -} - func isPodReady(ctx context.Context, c kubernetes.Interface, podName, namespace string) wait.ConditionFunc { return func() (bool, error) { pod, err := c.CoreV1().Pods(namespace).Get(ctx, podName, metav1.GetOptions{}) @@ -218,3 +132,22 @@ func getSlavesCount(count testkube.Variable) (int, error) { } return rplicaCount, err } + +func GetSlavesIpString(podNameIpMap map[string]string) string { + podIps := []string{} + for _, ip := range podNameIpMap { + podIps = append(podIps, ip) + } + return strings.Join(podIps, ",") +} + +func ValidateAndGetSlavePodName(testName string, executionId string, currentSlaveCount int) string { + slavePodName := fmt.Sprintf("%s-slave-%v-%s", testName, currentSlaveCount, executionId) + if len(slavePodName) > 64 { + //Get first 20 chars from testName name if pod name > 64 + shortExecutionName := testName[:20] + slavePodName = fmt.Sprintf("%s-slave-%v-%s", shortExecutionName, currentSlaveCount, executionId) + } + return slavePodName + +}