From 7ead9dd324d83ae10a325671245d92bb8dc8d77b Mon Sep 17 00:00:00 2001 From: Jayendra Parsai Date: Tue, 13 Jan 2026 13:14:34 +0530 Subject: [PATCH 1/2] chore: add e2e tests for agent Assisted by: Cursor Signed-off-by: Jayendra Parsai --- Makefile | 2 +- .../e2e/ginkgo/fixture/agent/fixture.go | 564 ++++++++++++++ test/openshift/e2e/ginkgo/fixture/fixture.go | 31 + ...51_validate_argocd_agent_principal_test.go | 693 ++++++++++++++++++ .../1-052_validate_argocd_agent_agent_test.go | 448 +++++++++++ ...e_argocd_agent_principal_connected_test.go | 624 ++++++++++++++++ 6 files changed, 2361 insertions(+), 1 deletion(-) create mode 100644 test/openshift/e2e/ginkgo/fixture/agent/fixture.go create mode 100644 test/openshift/e2e/ginkgo/sequential/1-051_validate_argocd_agent_principal_test.go create mode 100644 test/openshift/e2e/ginkgo/sequential/1-052_validate_argocd_agent_agent_test.go create mode 100644 test/openshift/e2e/ginkgo/sequential/1-053_validate_argocd_agent_principal_connected_test.go diff --git a/Makefile b/Makefile index 5a038a1e2..f9b9bc914 100644 --- a/Makefile +++ b/Makefile @@ -211,7 +211,7 @@ build: generate fmt vet ## Build manager binary. .PHONY: run run: manifests generate fmt vet ## Run a controller from your host. - CLUSTER_SCOPED_ARGO_ROLLOUTS_NAMESPACES=argo-rollouts,test-rom-ns-1,rom-ns-1,openshift-gitops ARGOCD_CLUSTER_CONFIG_NAMESPACES="openshift-gitops, argocd-e2e-cluster-config, argocd-test-impersonation-1-046, argocd-agent-principal-1-051, argocd-agent-agent-1-052, appset-argocd, appset-old-ns, appset-new-ns" REDIS_CONFIG_PATH="build/redis" go run ./cmd/main.go + CLUSTER_SCOPED_ARGO_ROLLOUTS_NAMESPACES=argo-rollouts,test-rom-ns-1,rom-ns-1,openshift-gitops ARGOCD_CLUSTER_CONFIG_NAMESPACES="openshift-gitops, argocd-e2e-cluster-config, argocd-test-impersonation-1-046, argocd-agent-principal-1-051, argocd-agent-agent-1-052, appset-argocd, appset-old-ns, appset-new-ns, ns-hosting-principal, ns-hosting-managed-agent, ns-hosting-autonomous-agent" REDIS_CONFIG_PATH="build/redis" go run ./cmd/main.go .PHONY: docker-build docker-build: test ## Build container image with the manager. diff --git a/test/openshift/e2e/ginkgo/fixture/agent/fixture.go b/test/openshift/e2e/ginkgo/fixture/agent/fixture.go new file mode 100644 index 000000000..2addf6ebc --- /dev/null +++ b/test/openshift/e2e/ginkgo/fixture/agent/fixture.go @@ -0,0 +1,564 @@ +package argocdagentprincipal + +import ( + "bytes" + "context" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/base64" + "encoding/json" + "encoding/pem" + "fmt" + "math/big" + "net" + "sort" + "strings" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + routev1 "github.com/openshift/api/route/v1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + argov1beta1api "github.com/argoproj-labs/argocd-operator/api/v1beta1" + "github.com/redhat-developer/gitops-operator/test/openshift/e2e/ginkgo/fixture" + k8sFixture "github.com/redhat-developer/gitops-operator/test/openshift/e2e/ginkgo/fixture/k8s" + osFixture "github.com/redhat-developer/gitops-operator/test/openshift/e2e/ginkgo/fixture/os" + "github.com/redhat-developer/gitops-operator/test/openshift/e2e/ginkgo/fixture/utils" +) + +type PrincipalResources struct { + PrincipalNamespaceName string + ArgoCDAgentPrincipalName string + ArgoCDName string + ServiceAccount *corev1.ServiceAccount + Role *rbacv1.Role + RoleBinding *rbacv1.RoleBinding + ClusterRole *rbacv1.ClusterRole + ClusterRoleBinding *rbacv1.ClusterRoleBinding + PrincipalDeployment *appsv1.Deployment + PrincipalRoute *routev1.Route + ServicesToDelete []string +} + +type PrincipalSecretsConfig struct { + PrincipalNamespaceName string + PrincipalServiceName string + ResourceProxyServiceName string + JWTSecretName string + PrincipalTLSSecretName string + RootCASecretName string + ResourceProxyTLSSecretName string + AdditionalPrincipalSANs []string + AdditionalResourceProxySANs []string +} + +type AgentSecretsConfig struct { + AgentNamespace *corev1.Namespace + PrincipalNamespaceName string + PrincipalRootCASecretName string + AgentRootCASecretName string + ClientTLSSecretName string + ClientCommonName string + ClientDNSNames []string +} + +type ClusterRegistrationSecretConfig struct { + PrincipalNamespaceName string + AgentNamespaceName string + AgentName string + ResourceProxyServiceName string + ResourceProxyPort int32 + PrincipalRootCASecretName string + AgentTLSSecretName string + Server string +} + +type AgentSecretNames struct { + JWTSecretName string + PrincipalTLSSecretName string + RootCASecretName string + ResourceProxyTLSSecretName string + RedisInitialPasswordSecretName string +} + +type VerifyExpectedResourcesExistParams struct { + Namespace *corev1.Namespace + ArgoCDAgentPrincipalName string + ArgoCDName string + ServiceAccount *corev1.ServiceAccount + Role *rbacv1.Role + RoleBinding *rbacv1.RoleBinding + ClusterRole *rbacv1.ClusterRole + ClusterRoleBinding *rbacv1.ClusterRoleBinding + PrincipalDeployment *appsv1.Deployment + PrincipalRoute *routev1.Route + SecretNames AgentSecretNames + ServiceNames []string + DeploymentNames []string + ExpectRoute *bool +} + +func VerifyResourcesDeleted(resources PrincipalResources) { + + By("verifying resources are deleted for principal pod") + + Eventually(resources.ServiceAccount).Should(k8sFixture.NotExistByName()) + Eventually(resources.Role).Should(k8sFixture.NotExistByName()) + Eventually(resources.RoleBinding).Should(k8sFixture.NotExistByName()) + Eventually(resources.ClusterRole).Should(k8sFixture.NotExistByName()) + Eventually(resources.ClusterRoleBinding).Should(k8sFixture.NotExistByName()) + Eventually(resources.PrincipalDeployment).Should(k8sFixture.NotExistByName()) + + for _, serviceName := range resources.ServicesToDelete { + if serviceName == "" { + continue + } + service := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: serviceName, + Namespace: resources.PrincipalNamespaceName, + }, + } + Eventually(service).Should(k8sFixture.NotExistByName()) + } + + if fixture.RunningOnOpenShift() { + Eventually(resources.PrincipalRoute).Should(k8sFixture.NotExistByName()) + } +} + +func CreateRequiredSecrets(cfg PrincipalSecretsConfig) { + k8sClient, _ := utils.GetE2ETestKubeClient() + ctx := context.Background() + + By("creating required secrets for principal pod") + + jwtKey := generateJWTSigningKey() + jwtSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: cfg.JWTSecretName, + Namespace: cfg.PrincipalNamespaceName, + }, + Data: map[string][]byte{ + "jwt.key": jwtKey, + }, + } + Expect(k8sClient.Create(ctx, jwtSecret)).To(Succeed()) + + caKey, caCert, caCertPEM := generateCertificateAuthority() + caKeyPEM := encodePrivateKeyToPEM(caKey) + + caSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: cfg.RootCASecretName, + Namespace: cfg.PrincipalNamespaceName, + }, + Type: corev1.SecretTypeTLS, + Data: map[string][]byte{ + "tls.crt": caCertPEM, + "tls.key": caKeyPEM, + "ca.crt": caCertPEM, + }, + } + Expect(k8sClient.Create(ctx, caSecret)).To(Succeed()) + + principalDNS, principalIPs := aggregateSANs(cfg.PrincipalNamespaceName, cfg.PrincipalServiceName, cfg.AdditionalPrincipalSANs) + principalCertPEM, principalKeyPEM := issueCertificate(caCert, caKey, certificateRequest{ + CommonName: cfg.PrincipalServiceName, + DNSNames: principalDNS, + IPAddresses: principalIPs, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + }) + createTLSSecret(ctx, k8sClient, cfg.PrincipalNamespaceName, cfg.PrincipalTLSSecretName, principalCertPEM, principalKeyPEM, caCertPEM) + + resourceProxyDNS, resourceProxyIPs := aggregateSANs(cfg.PrincipalNamespaceName, cfg.ResourceProxyServiceName, cfg.AdditionalResourceProxySANs) + resourceProxyCertPEM, resourceProxyKeyPEM := issueCertificate(caCert, caKey, certificateRequest{ + CommonName: cfg.ResourceProxyServiceName, + DNSNames: resourceProxyDNS, + IPAddresses: resourceProxyIPs, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + }) + createTLSSecret(ctx, k8sClient, cfg.PrincipalNamespaceName, cfg.ResourceProxyTLSSecretName, resourceProxyCertPEM, resourceProxyKeyPEM, caCertPEM) +} + +func CreateRequiredAgentSecrets(cfg AgentSecretsConfig) { + k8sClient, _ := utils.GetE2ETestKubeClient() + ctx := context.Background() + + agentRootCASecretName := cfg.AgentRootCASecretName + if agentRootCASecretName == "" { + agentRootCASecretName = cfg.PrincipalRootCASecretName + } + + var principalCASecret corev1.Secret + Expect(k8sClient.Get(ctx, types.NamespacedName{ + Name: cfg.PrincipalRootCASecretName, + Namespace: cfg.PrincipalNamespaceName, + }, &principalCASecret)).To(Succeed()) + + caCertPEM := principalCASecret.Data["tls.crt"] + Expect(caCertPEM).ToNot(BeEmpty(), "CA certificate must be present in principal namespace secret") + caKeyPEM := principalCASecret.Data["tls.key"] + Expect(caKeyPEM).ToNot(BeEmpty(), "CA private key must be present in principal namespace secret") + + caCert := parseCertificate(caCertPEM) + caKey := parsePrivateKey(caKeyPEM) + + clientDNS, clientIPs := aggregateClientSANs(cfg.ClientDNSNames) + clientCertPEM, clientKeyPEM := issueCertificate(caCert, caKey, certificateRequest{ + CommonName: cfg.ClientCommonName, + DNSNames: clientDNS, + IPAddresses: clientIPs, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + }) + + createTLSSecret(ctx, k8sClient, cfg.AgentNamespace.Name, cfg.ClientTLSSecretName, clientCertPEM, clientKeyPEM, caCertPEM) + + // Propagate CA certificate without private key to the agent namespace + propagatedCASecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: agentRootCASecretName, + Namespace: cfg.AgentNamespace.Name, + }, + Type: corev1.SecretTypeOpaque, + Data: map[string][]byte{ + "tls.crt": caCertPEM, + "ca.crt": caCertPEM, + }, + } + Expect(k8sClient.Create(ctx, propagatedCASecret)).To(Succeed()) +} + +func CreateClusterRegistrationSecret(cfg ClusterRegistrationSecretConfig) { + k8sClient, _ := utils.GetE2ETestKubeClient() + ctx := context.Background() + + server := cfg.Server + if server == "" { + port := cfg.ResourceProxyPort + if port == 0 { + port = 9090 + } + host := fmt.Sprintf("%s.%s.svc", cfg.ResourceProxyServiceName, cfg.PrincipalNamespaceName) + server = fmt.Sprintf("https://%s:%d?agentName=%s", host, port, cfg.AgentName) + } + + var caSecret corev1.Secret + Expect(k8sClient.Get(ctx, types.NamespacedName{ + Name: cfg.PrincipalRootCASecretName, + Namespace: cfg.PrincipalNamespaceName, + }, &caSecret)).To(Succeed()) + + caData := caSecret.Data["ca.crt"] + if len(caData) == 0 { + caData = caSecret.Data["tls.crt"] + } + Expect(caData).ToNot(BeEmpty(), "CA certificate missing from principal root secret") + + var agentTLSSecret corev1.Secret + Expect(k8sClient.Get(ctx, types.NamespacedName{ + Name: cfg.AgentTLSSecretName, + Namespace: cfg.AgentNamespaceName, + }, &agentTLSSecret)).To(Succeed()) + + clientCert := agentTLSSecret.Data["tls.crt"] + Expect(clientCert).ToNot(BeEmpty(), "agent TLS certificate missing") + clientKey := agentTLSSecret.Data["tls.key"] + Expect(clientKey).ToNot(BeEmpty(), "agent TLS private key missing") + + configPayload, err := json.Marshal(map[string]any{ + "tlsClientConfig": map[string]any{ + "insecure": false, + "certData": base64.StdEncoding.EncodeToString(clientCert), + "keyData": base64.StdEncoding.EncodeToString(clientKey), + "caData": base64.StdEncoding.EncodeToString(caData), + }, + }) + Expect(err).ToNot(HaveOccurred()) + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("cluster-%s", cfg.AgentName), + Namespace: cfg.PrincipalNamespaceName, + Labels: map[string]string{ + "argocd.argoproj.io/secret-type": "cluster", + "argocd-agent.argoproj-labs.io/agent-name": cfg.AgentName, + }, + }, + Type: corev1.SecretTypeOpaque, + Data: map[string][]byte{ + "name": []byte(cfg.AgentName), + "server": []byte(server), + "config": configPayload, + }, + } + Expect(k8sClient.Create(ctx, secret)).To(Succeed()) +} + +func VerifyExpectedResourcesExist(params VerifyExpectedResourcesExistParams) { + shouldExpectRoute := true + if params.ExpectRoute != nil { + shouldExpectRoute = *params.ExpectRoute + } + + By("verifying expected resources exist") + + if params.SecretNames.RedisInitialPasswordSecretName != "" { + Eventually(&corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: params.SecretNames.RedisInitialPasswordSecretName, + Namespace: params.Namespace.Name, + }, + }, "30s", "2s").Should(k8sFixture.ExistByName()) + } + + Eventually(params.ServiceAccount).Should(k8sFixture.ExistByName()) + Eventually(params.Role).Should(k8sFixture.ExistByName()) + Eventually(params.RoleBinding).Should(k8sFixture.ExistByName()) + Eventually(params.ClusterRole).Should(k8sFixture.ExistByName()) + Eventually(params.ClusterRoleBinding).Should(k8sFixture.ExistByName()) + + for _, serviceName := range params.ServiceNames { + if serviceName == "" { + continue + } + service := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: serviceName, + Namespace: params.Namespace.Name, + }, + } + Eventually(service).Should(k8sFixture.ExistByName(), "Service '%s' should exist in namespace '%s'", serviceName, params.Namespace.Name) + + if serviceName != params.ArgoCDAgentPrincipalName { + Expect(string(service.Spec.Type)).To(Equal("ClusterIP"), "Service '%s' should have ClusterIP type", serviceName) + } + } + + if shouldExpectRoute && fixture.RunningOnOpenShift() { + Eventually(params.PrincipalRoute).Should(k8sFixture.ExistByName()) + } + + for _, deploymentName := range params.DeploymentNames { + if deploymentName == "" { + continue + } + depl := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: deploymentName, + Namespace: params.Namespace.Name, + }, + } + Eventually(depl).Should(k8sFixture.ExistByName(), "Deployment '%s' should exist in namespace '%s'", deploymentName, params.Namespace.Name) + } + + Eventually(params.PrincipalDeployment).Should(k8sFixture.ExistByName()) + Eventually(params.PrincipalDeployment).Should(k8sFixture.HaveLabelWithValue("app.kubernetes.io/component", string(argov1beta1api.AgentComponentTypePrincipal))) + Eventually(params.PrincipalDeployment).Should(k8sFixture.HaveLabelWithValue("app.kubernetes.io/managed-by", params.ArgoCDName)) + Eventually(params.PrincipalDeployment).Should(k8sFixture.HaveLabelWithValue("app.kubernetes.io/name", params.ArgoCDAgentPrincipalName)) + Eventually(params.PrincipalDeployment).Should(k8sFixture.HaveLabelWithValue("app.kubernetes.io/part-of", "argocd-agent")) +} + +func VerifyLogs(deploymentName, namespace string, requiredMessages []string) { + Eventually(func() bool { + logOutput, err := osFixture.ExecCommandWithOutputParam(false, "kubectl", "logs", + "deployment/"+deploymentName, "-n", namespace, "--tail=200") + if err != nil { + GinkgoWriter.Println("Error getting agent logs: ", err) + return false + } + + for _, msg := range requiredMessages { + if !strings.Contains(logOutput, msg) { + GinkgoWriter.Println("Expected agent log not found:", msg) + return false + } + } + return true + }, "120s", "5s").Should(BeTrue(), "Agent should process cluster cache updates") +} + +type certificateRequest struct { + CommonName string + DNSNames []string + IPAddresses []net.IP + ExtKeyUsage []x509.ExtKeyUsage +} + +func generateCertificateAuthority() (*rsa.PrivateKey, *x509.Certificate, []byte) { + privateKey, err := rsa.GenerateKey(rand.Reader, 2048) + Expect(err).ToNot(HaveOccurred()) + + template := x509.Certificate{ + SerialNumber: randomSerialNumber(), + Subject: pkix.Name{CommonName: "argocd-agent-ca"}, + NotBefore: time.Now().Add(-1 * time.Hour), + NotAfter: time.Now().Add(24 * time.Hour), + KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign, + BasicConstraintsValid: true, + IsCA: true, + } + + certDER, err := x509.CreateCertificate(rand.Reader, &template, &template, &privateKey.PublicKey, privateKey) + Expect(err).ToNot(HaveOccurred()) + + cert, err := x509.ParseCertificate(certDER) + Expect(err).ToNot(HaveOccurred()) + + return privateKey, cert, pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: certDER}) +} + +func issueCertificate(caCert *x509.Certificate, caKey *rsa.PrivateKey, req certificateRequest) ([]byte, []byte) { + key, err := rsa.GenerateKey(rand.Reader, 2048) + Expect(err).ToNot(HaveOccurred()) + + template := x509.Certificate{ + SerialNumber: randomSerialNumber(), + Subject: pkix.Name{ + CommonName: req.CommonName, + }, + NotBefore: time.Now().Add(-1 * time.Hour), + NotAfter: time.Now().Add(24 * time.Hour), + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment, + ExtKeyUsage: req.ExtKeyUsage, + DNSNames: req.DNSNames, + IPAddresses: req.IPAddresses, + } + + certDER, err := x509.CreateCertificate(rand.Reader, &template, caCert, &key.PublicKey, caKey) + Expect(err).ToNot(HaveOccurred()) + + certPEM := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: certDER}) + keyPEM := encodePrivateKeyToPEM(key) + + return certPEM, keyPEM +} + +func createTLSSecret(ctx context.Context, k8sClient client.Client, namespace, secretName string, certPEM, keyPEM, caCertPEM []byte) { + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: namespace, + }, + Type: corev1.SecretTypeTLS, + Data: map[string][]byte{ + "tls.crt": certPEM, + "tls.key": keyPEM, + "ca.crt": caCertPEM, + }, + } + Expect(k8sClient.Create(ctx, secret)).To(Succeed()) +} + +func generateJWTSigningKey() []byte { + key, err := rsa.GenerateKey(rand.Reader, 2048) + Expect(err).ToNot(HaveOccurred()) + + keyDER, err := x509.MarshalPKCS8PrivateKey(key) + Expect(err).ToNot(HaveOccurred()) + + return pem.EncodeToMemory(&pem.Block{Type: "PRIVATE KEY", Bytes: keyDER}) +} + +func encodePrivateKeyToPEM(key *rsa.PrivateKey) []byte { + keyDER, err := x509.MarshalPKCS8PrivateKey(key) + Expect(err).ToNot(HaveOccurred()) + + return pem.EncodeToMemory(&pem.Block{Type: "PRIVATE KEY", Bytes: keyDER}) +} + +func parseCertificate(certPEM []byte) *x509.Certificate { + block, _ := pem.Decode(certPEM) + Expect(block).ToNot(BeNil(), "invalid certificate data") + cert, err := x509.ParseCertificate(block.Bytes) + Expect(err).ToNot(HaveOccurred()) + return cert +} + +func parsePrivateKey(keyPEM []byte) *rsa.PrivateKey { + block, _ := pem.Decode(keyPEM) + Expect(block).ToNot(BeNil(), "invalid private key data") + parsedKey, err := x509.ParsePKCS8PrivateKey(block.Bytes) + Expect(err).ToNot(HaveOccurred()) + + privateKey, ok := parsedKey.(*rsa.PrivateKey) + Expect(ok).To(BeTrue(), "private key is not RSA") + return privateKey +} + +func randomSerialNumber() *big.Int { + serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) + serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) + Expect(err).ToNot(HaveOccurred()) + return serialNumber +} + +func aggregateSANs(namespace, serviceName string, additional []string) ([]string, []net.IP) { + defaults := buildDefaultSANs(serviceName, namespace) + return aggregateSANLists(defaults, additional) +} + +func aggregateClientSANs(additional []string) ([]string, []net.IP) { + return aggregateSANLists(nil, additional) +} + +func aggregateSANLists(defaults, additional []string) ([]string, []net.IP) { + dnsSet := map[string]struct{}{} + ipSet := map[string]struct{}{} + var dnsNames []string + var ipAddresses []net.IP + + addEntry := func(entry string) { + entry = strings.TrimSpace(entry) + if entry == "" { + return + } + if ip := net.ParseIP(entry); ip != nil { + key := ip.String() + if _, found := ipSet[key]; !found { + ipSet[key] = struct{}{} + ipAddresses = append(ipAddresses, ip) + } + return + } + if _, found := dnsSet[entry]; !found { + dnsSet[entry] = struct{}{} + dnsNames = append(dnsNames, entry) + } + } + + for _, entry := range defaults { + addEntry(entry) + } + for _, entry := range additional { + addEntry(entry) + } + + sort.Strings(dnsNames) + sort.Slice(ipAddresses, func(i, j int) bool { + return bytes.Compare(ipAddresses[i], ipAddresses[j]) < 0 + }) + + return dnsNames, ipAddresses +} + +func buildDefaultSANs(serviceName, namespace string) []string { + if serviceName == "" || namespace == "" { + return nil + } + return []string{ + serviceName, + fmt.Sprintf("%s.%s", serviceName, namespace), + fmt.Sprintf("%s.%s.svc", serviceName, namespace), + fmt.Sprintf("%s.%s.svc.cluster.local", serviceName, namespace), + } +} diff --git a/test/openshift/e2e/ginkgo/fixture/fixture.go b/test/openshift/e2e/ginkgo/fixture/fixture.go index 13219e730..edefcd09b 100644 --- a/test/openshift/e2e/ginkgo/fixture/fixture.go +++ b/test/openshift/e2e/ginkgo/fixture/fixture.go @@ -999,3 +999,34 @@ func outputPodLog(podSubstring string) { func IsUpstreamOperatorTests() bool { return false // This function should return true if running from argocd-operator repo, false if running from gitops-operator repo. This is to distinguish between tests in upstream argocd-operator and downstream gitops-operator repos. } + +// Create a namespace 'name' that is managed by a cluster-scoped ArgoCD instance, via managed-by-cluster-argocd label. +func CreateClusterScopedManagedNamespace(name string, managedByArgoCDInstance string) *corev1.Namespace { + k8sClient, _ := utils.GetE2ETestKubeClient() + + ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: name}} + + // If the Namespace already exists, delete it first + if err := k8sClient.Get(context.Background(), client.ObjectKeyFromObject(ns), ns); err == nil { + // Namespace exists, so delete it first + Expect(deleteNamespaceAndVerify(context.Background(), ns.Name, k8sClient)).To(Succeed()) + } + + ns = &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{ + Name: name, + Labels: map[string]string{ + E2ETestLabelsKey: E2ETestLabelsValue, + "argocd.argoproj.io/managed-by-cluster-argocd": managedByArgoCDInstance, + }, + }} + + Expect(k8sClient.Create(context.Background(), ns)).To(Succeed()) + + return ns + +} + +func CreateClusterScopedManagedNamespaceWithCleanupFunc(name string, managedByArgoCDInstance string) (*corev1.Namespace, func()) { + ns := CreateClusterScopedManagedNamespace(name, managedByArgoCDInstance) + return ns, nsDeletionFunc(ns) +} diff --git a/test/openshift/e2e/ginkgo/sequential/1-051_validate_argocd_agent_principal_test.go b/test/openshift/e2e/ginkgo/sequential/1-051_validate_argocd_agent_principal_test.go new file mode 100644 index 000000000..1daf6518e --- /dev/null +++ b/test/openshift/e2e/ginkgo/sequential/1-051_validate_argocd_agent_principal_test.go @@ -0,0 +1,693 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sequential + +import ( + "context" + "fmt" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + routev1 "github.com/openshift/api/route/v1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" + + argov1beta1api "github.com/argoproj-labs/argocd-operator/api/v1beta1" + "github.com/argoproj-labs/argocd-operator/controllers/argocdagent" + "github.com/redhat-developer/gitops-operator/test/openshift/e2e/ginkgo/fixture" + agentFixture "github.com/redhat-developer/gitops-operator/test/openshift/e2e/ginkgo/fixture/agent" + argocdFixture "github.com/redhat-developer/gitops-operator/test/openshift/e2e/ginkgo/fixture/argocd" + deploymentFixture "github.com/redhat-developer/gitops-operator/test/openshift/e2e/ginkgo/fixture/deployment" + k8sFixture "github.com/redhat-developer/gitops-operator/test/openshift/e2e/ginkgo/fixture/k8s" + fixtureUtils "github.com/redhat-developer/gitops-operator/test/openshift/e2e/ginkgo/fixture/utils" +) + +var _ = Describe("GitOps Operator Sequential E2E Tests", func() { + Context("1-051_validate_argocd_agent_principal", func() { + + var ( + k8sClient client.Client + ctx context.Context + argoCD *argov1beta1api.ArgoCD + ns *corev1.Namespace + cleanupFunc func() + serviceAccount *corev1.ServiceAccount + role *rbacv1.Role + roleBinding *rbacv1.RoleBinding + clusterRole *rbacv1.ClusterRole + clusterRoleBinding *rbacv1.ClusterRoleBinding + serviceNames []string + deploymentNames []string + principalDeployment *appsv1.Deployment + expectedEnvVariables map[string]string + secretNames agentFixture.AgentSecretNames + principalRoute *routev1.Route + resourceProxyServiceName string + principalResources agentFixture.PrincipalResources + ) + + BeforeEach(func() { + fixture.EnsureSequentialCleanSlate() + fixture.SetEnvInOperatorSubscriptionOrDeployment("ARGOCD_CLUSTER_CONFIG_NAMESPACES", "openshift-gitops, argocd-agent-principal-1-051") + + k8sClient, _ = fixtureUtils.GetE2ETestKubeClient() + ctx = context.Background() + ns, cleanupFunc = fixture.CreateNamespaceWithCleanupFunc("argocd-agent-principal-1-051") + + // Define ArgoCD CR with principal enabled + argoCD = &argov1beta1api.ArgoCD{ + ObjectMeta: metav1.ObjectMeta{ + Name: argoCDAgentInstanceNamePrincipal, + Namespace: ns.Name, + }, + Spec: argov1beta1api.ArgoCDSpec{ + Controller: argov1beta1api.ArgoCDApplicationControllerSpec{ + Enabled: ptr.To(false), + }, + ArgoCDAgent: &argov1beta1api.ArgoCDAgentSpec{ + Principal: &argov1beta1api.PrincipalSpec{ + Enabled: ptr.To(true), + Auth: "mtls:CN=([^,]+)", + LogLevel: "info", + Namespace: &argov1beta1api.PrincipalNamespaceSpec{ + AllowedNamespaces: []string{ + "*", + }, + }, + TLS: &argov1beta1api.PrincipalTLSSpec{ + InsecureGenerate: ptr.To(true), + }, + JWT: &argov1beta1api.PrincipalJWTSpec{ + InsecureGenerate: ptr.To(true), + }, + Server: &argov1beta1api.PrincipalServerSpec{ + KeepAliveMinInterval: "30s", + }, + }, + }, + SourceNamespaces: []string{ + "agent-managed", + "agent-autonomous", + }, + }, + } + + // Define required resources for principal pod + serviceAccount = &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: deploymentNameAgentPrincipal, + Namespace: ns.Name, + }, + } + + role = &rbacv1.Role{ + ObjectMeta: metav1.ObjectMeta{ + Name: deploymentNameAgentPrincipal, + Namespace: ns.Name, + }, + } + + roleBinding = &rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: deploymentNameAgentPrincipal, + Namespace: ns.Name, + }, + } + + clusterRole = &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%s-agent-principal", argoCDAgentInstanceNamePrincipal, ns.Name), + }, + } + + clusterRoleBinding = &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%s-agent-principal", argoCDAgentInstanceNamePrincipal, ns.Name), + }, + } + + // List required secrets for principal pod + secretNames = agentFixture.AgentSecretNames{ + JWTSecretName: agentJWTSecretName, + PrincipalTLSSecretName: agentPrincipalTLSSecretName, + RootCASecretName: agentRootCASecretName, + ResourceProxyTLSSecretName: agentResourceProxyTLSSecretName, + RedisInitialPasswordSecretName: fmt.Sprintf("%s-redis-initial-password", argoCDAgentInstanceNamePrincipal), + } + + resourceProxyServiceName = fmt.Sprintf("%s-agent-principal-resource-proxy", argoCDAgentInstanceNamePrincipal) + serviceNames = []string{ + deploymentNameAgentPrincipal, + fmt.Sprintf("%s-agent-principal-metrics", argoCDAgentInstanceNamePrincipal), + fmt.Sprintf("%s-redis", argoCDAgentInstanceNamePrincipal), + fmt.Sprintf("%s-repo-server", argoCDAgentInstanceNamePrincipal), + fmt.Sprintf("%s-server", argoCDAgentInstanceNamePrincipal), + fmt.Sprintf("%s-agent-principal-redisproxy", argoCDAgentInstanceNamePrincipal), + resourceProxyServiceName, + fmt.Sprintf("%s-agent-principal-healthz", argoCDAgentInstanceNamePrincipal), + } + deploymentNames = []string{fmt.Sprintf("%s-redis", argoCDAgentInstanceNamePrincipal), fmt.Sprintf("%s-repo-server", argoCDAgentInstanceNamePrincipal), fmt.Sprintf("%s-server", argoCDAgentInstanceNamePrincipal)} + + principalDeployment = &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: deploymentNameAgentPrincipal, + Namespace: ns.Name, + }, + } + + principalRoute = &routev1.Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-agent-principal", argoCDAgentInstanceNamePrincipal), + Namespace: ns.Name, + }, + } + + // List environment variables with expected values for the principal deployment + expectedEnvVariables = map[string]string{ + argocdagent.EnvArgoCDPrincipalLogLevel: "info", + argocdagent.EnvArgoCDPrincipalNamespace: ns.Name, + argocdagent.EnvArgoCDPrincipalAllowedNamespaces: "*", + argocdagent.EnvArgoCDPrincipalNamespaceCreateEnable: "false", + argocdagent.EnvArgoCDPrincipalNamespaceCreatePattern: "", + argocdagent.EnvArgoCDPrincipalNamespaceCreateLabels: "", + argocdagent.EnvArgoCDPrincipalTLSServerAllowGenerate: "true", + argocdagent.EnvArgoCDPrincipalJWTAllowGenerate: "true", + argocdagent.EnvArgoCDPrincipalAuth: "mtls:CN=([^,]+)", + argocdagent.EnvArgoCDPrincipalEnableResourceProxy: "true", + argocdagent.EnvArgoCDPrincipalKeepAliveMinInterval: "30s", + argocdagent.EnvArgoCDPrincipalRedisServerAddress: fmt.Sprintf("%s-%s:%d", argoCDAgentInstanceNamePrincipal, "redis", ArgoCDDefaultRedisPort), + argocdagent.EnvArgoCDPrincipalRedisCompressionType: "gzip", + argocdagent.EnvArgoCDPrincipalLogFormat: "text", + argocdagent.EnvArgoCDPrincipalEnableWebSocket: "false", + argocdagent.EnvArgoCDPrincipalTLSSecretName: agentPrincipalTLSSecretName, + argocdagent.EnvArgoCDPrincipalTLSServerRootCASecretName: agentRootCASecretName, + argocdagent.EnvArgoCDPrincipalResourceProxySecretName: agentResourceProxyTLSSecretName, + argocdagent.EnvArgoCDPrincipalResourceProxyCaSecretName: agentRootCASecretName, + argocdagent.EnvArgoCDPrincipalJwtSecretName: agentJWTSecretName, + } + + principalResources = agentFixture.PrincipalResources{ + PrincipalNamespaceName: ns.Name, + ArgoCDAgentPrincipalName: deploymentNameAgentPrincipal, + ArgoCDName: argoCDAgentInstanceNamePrincipal, + ServiceAccount: serviceAccount, + Role: role, + RoleBinding: roleBinding, + ClusterRole: clusterRole, + ClusterRoleBinding: clusterRoleBinding, + PrincipalDeployment: principalDeployment, + PrincipalRoute: principalRoute, + ServicesToDelete: []string{ + deploymentNameAgentPrincipal, + fmt.Sprintf("%s-agent-principal-metrics", argoCDAgentInstanceNamePrincipal), + fmt.Sprintf("%s-agent-principal-redisproxy", argoCDAgentInstanceNamePrincipal), + resourceProxyServiceName, + fmt.Sprintf("%s-agent-principal-healthz", argoCDAgentInstanceNamePrincipal), + }, + } + }) + + AfterEach(func() { + By("Cleanup cluster-scoped resources") + _ = k8sClient.Delete(ctx, clusterRole) + _ = k8sClient.Delete(ctx, clusterRoleBinding) + + By("Cleanup namespace") + if cleanupFunc != nil { + cleanupFunc() + } + }) + + createRequiredSecrets := func(namespace *corev1.Namespace, additionalPrincipalSANs ...string) { + agentFixture.CreateRequiredSecrets(agentFixture.PrincipalSecretsConfig{ + PrincipalNamespaceName: namespace.Name, + PrincipalServiceName: deploymentNameAgentPrincipal, + ResourceProxyServiceName: resourceProxyServiceName, + JWTSecretName: secretNames.JWTSecretName, + PrincipalTLSSecretName: secretNames.PrincipalTLSSecretName, + RootCASecretName: secretNames.RootCASecretName, + ResourceProxyTLSSecretName: secretNames.ResourceProxyTLSSecretName, + AdditionalPrincipalSANs: additionalPrincipalSANs, + }) + } + + verifyExpectedResourcesExist := func(namespace *corev1.Namespace, expectRoute ...bool) { + var expectRoutePtr *bool + if len(expectRoute) > 0 { + expectRoutePtr = ptr.To(expectRoute[0]) + } + + agentFixture.VerifyExpectedResourcesExist(agentFixture.VerifyExpectedResourcesExistParams{ + Namespace: namespace, + ArgoCDAgentPrincipalName: deploymentNameAgentPrincipal, + ArgoCDName: argoCDAgentInstanceNamePrincipal, + ServiceAccount: serviceAccount, + Role: role, + RoleBinding: roleBinding, + ClusterRole: clusterRole, + ClusterRoleBinding: clusterRoleBinding, + PrincipalDeployment: principalDeployment, + PrincipalRoute: principalRoute, + SecretNames: secretNames, + ServiceNames: serviceNames, + DeploymentNames: deploymentNames, + ExpectRoute: expectRoutePtr, + }) + } + + verifyResourcesDeleted := func() { + agentFixture.VerifyResourcesDeleted(principalResources) + } + + It("should create argocd agent principal resources, but pod should fail to start as image does not exist", func() { + // Change log level to trace and custom image name + argoCD.Spec.ArgoCDAgent.Principal.LogLevel = "trace" + argoCD.Spec.ArgoCDAgent.Principal.Image = "quay.io/user/argocd-agent:v1" + + By("Create ArgoCD instance") + + Expect(k8sClient.Create(ctx, argoCD)).To(Succeed()) + + By("Verify expected resources are created for principal pod") + + verifyExpectedResourcesExist(ns) + + By("Verify principal has the custom image we specified in ArgoCD CR") + + container := deploymentFixture.GetTemplateSpecContainerByName(deploymentNameAgentPrincipal, *principalDeployment) + Expect(container).ToNot(BeNil()) + Expect(container.Image).To(Equal("quay.io/user/argocd-agent:v1")) + + By("Verify environment variables are set correctly") + + // update expected value in default environment variables according to ArgoCD CR in the test + expectedEnvVariables[argocdagent.EnvArgoCDPrincipalLogLevel] = "trace" + + for key, value := range expectedEnvVariables { + Expect(container.Env).To(ContainElement(corev1.EnvVar{Name: key, Value: value}), "Environment variable %s should be set to %s", key, value) + } + + By("Disable principal") + + Expect(k8sClient.Get(ctx, client.ObjectKey{Name: argoCDAgentInstanceNamePrincipal, Namespace: ns.Name}, argoCD)).To(Succeed()) + + argocdFixture.Update(argoCD, func(ac *argov1beta1api.ArgoCD) { + ac.Spec.ArgoCDAgent.Principal.Enabled = ptr.To(false) + }) + + By("Verify principal resources are deleted") + + verifyResourcesDeleted() + }) + + It("should create argocd agent principal resources, and pod should start successfully with default image", func() { + + // Add a custom environment variable to the principal server + argoCD.Spec.ArgoCDAgent.Principal.Env = []corev1.EnvVar{{Name: "TEST_ENV", Value: "test_value"}} + + By("Create ArgoCD instance") + + Expect(k8sClient.Create(ctx, argoCD)).To(Succeed()) + + By("Verify expected resources are created for principal pod") + + verifyExpectedResourcesExist(ns) + + By("Verify principal uses the default agent image") + + container := deploymentFixture.GetTemplateSpecContainerByName(deploymentNameAgentPrincipal, *principalDeployment) + Expect(container).ToNot(BeNil()) + Expect(container.Image).To(Equal(ArgoCDAgentPrincipalDefaultImageName)) + + By("Create required secrets and certificates for principal pod to start properly") + + createRequiredSecrets(ns) + + By("Verify principal pod starts successfully by checking logs") + + agentFixture.VerifyLogs(deploymentNameAgentPrincipal, ns.Name, []string{ + "Starting metrics server", + "Redis proxy started", + "Application informer synced and ready", + "AppProject informer synced and ready", + "Resource proxy started", + "Namespace informer synced and ready", + "Starting healthz server", + }) + + By("verify that deployment is in Ready state") + + Eventually(principalDeployment, "120s", "5s").Should(deploymentFixture.HaveReadyReplicas(1), "Principal deployment should become ready") + + By("Verify environment variables are set correctly") + + for key, value := range expectedEnvVariables { + Expect(container.Env).To(ContainElement(corev1.EnvVar{Name: key, Value: value}), "Environment variable %s should be set to %s", key, value) + } + + Expect(container.Env).To(ContainElement(And( + HaveField("Name", argocdagent.EnvRedisPassword), + HaveField("ValueFrom.SecretKeyRef", Not(BeNil())), + )), "REDIS_PASSWORD should be set with valueFrom.secretKeyRef") + + By("Disable principal") + + Expect(k8sClient.Get(ctx, client.ObjectKey{Name: argoCDAgentInstanceNamePrincipal, Namespace: ns.Name}, argoCD)).To(Succeed()) + + argocdFixture.Update(argoCD, func(ac *argov1beta1api.ArgoCD) { + ac.Spec.ArgoCDAgent.Principal.Enabled = nil + }) + + By("Verify principal resources are deleted") + + verifyResourcesDeleted() + }) + + It("Should reflect configuration changes from ArgoCD CR to the principal deployment", func() { + + By("Create ArgoCD instance") + + argoCD.Spec.ArgoCDAgent.Principal.Image = "quay.io/argoprojlabs/argocd-agent:v0.5.0" + Expect(k8sClient.Create(ctx, argoCD)).To(Succeed()) + + By("Verify expected resources are created for principal pod") + + verifyExpectedResourcesExist(ns) + + By("Verify principal has the custom image we specified in ArgoCD CR") + + container := deploymentFixture.GetTemplateSpecContainerByName(deploymentNameAgentPrincipal, *principalDeployment) + Expect(container).ToNot(BeNil()) + Expect(container.Image).To(Equal("quay.io/argoprojlabs/argocd-agent:v0.5.0")) + + By("Verify environment variables are set correctly") + + // update expected value in default environment variables according to ArgoCD CR in the test + for key, value := range expectedEnvVariables { + Expect(container.Env).To(ContainElement(corev1.EnvVar{Name: key, Value: value}), "Environment variable %s should be set to %s", key, value) + } + + By("Update ArgoCD CR with new configuration") + + Expect(k8sClient.Get(ctx, client.ObjectKey{Name: argoCDAgentInstanceNamePrincipal, Namespace: ns.Name}, argoCD)).To(Succeed()) + + argocdFixture.Update(argoCD, func(ac *argov1beta1api.ArgoCD) { + + ac.Spec.ArgoCDAgent.Principal.LogLevel = "trace" + ac.Spec.ArgoCDAgent.Principal.LogFormat = "json" + ac.Spec.ArgoCDAgent.Principal.Server.KeepAliveMinInterval = "60s" + ac.Spec.ArgoCDAgent.Principal.Server.EnableWebSocket = ptr.To(true) + ac.Spec.ArgoCDAgent.Principal.Image = "quay.io/argoprojlabs/argocd-agent:v0.5.1" + + ac.Spec.ArgoCDAgent.Principal.Namespace.AllowedNamespaces = []string{"agent-managed", "agent-autonomous"} + ac.Spec.ArgoCDAgent.Principal.Namespace.EnableNamespaceCreate = ptr.To(true) + ac.Spec.ArgoCDAgent.Principal.Namespace.NamespaceCreatePattern = "agent-.*" + ac.Spec.ArgoCDAgent.Principal.Namespace.NamespaceCreateLabels = []string{"environment=agent"} + + ac.Spec.ArgoCDAgent.Principal.TLS.InsecureGenerate = ptr.To(false) + ac.Spec.ArgoCDAgent.Principal.TLS.SecretName = "argocd-agent-principal-tls-v2" + ac.Spec.ArgoCDAgent.Principal.TLS.RootCASecretName = "argocd-agent-ca-v2" + + ac.Spec.ArgoCDAgent.Principal.JWT.InsecureGenerate = ptr.To(false) + ac.Spec.ArgoCDAgent.Principal.JWT.SecretName = "argocd-agent-jwt-v2" + + ac.Spec.ArgoCDAgent.Principal.ResourceProxy = &argov1beta1api.PrincipalResourceProxySpec{ + SecretName: "argocd-agent-resource-proxy-tls-v2", + CASecretName: "argocd-agent-ca-v2", + } + + }) + + By("Create required secrets and certificates for principal pod to start properly") + + // Update secret names according to ArgoCD CR + secretNames = agentFixture.AgentSecretNames{ + JWTSecretName: "argocd-agent-jwt-v2", + PrincipalTLSSecretName: "argocd-agent-principal-tls-v2", + RootCASecretName: "argocd-agent-ca-v2", + ResourceProxyTLSSecretName: "argocd-agent-resource-proxy-tls-v2", + } + createRequiredSecrets(ns) + + By("Verify principal has the updated image we specified in ArgoCD CR") + + Eventually(principalDeployment).Should(k8sFixture.ExistByName()) + Eventually( + func() bool { + // Fetch the latest deployment from the cluster + err := k8sClient.Get(ctx, client.ObjectKey{Name: deploymentNameAgentPrincipal, Namespace: ns.Name}, principalDeployment) + if err != nil { + GinkgoWriter.Println("Error getting deployment for image check: ", err) + return false + } + container = deploymentFixture.GetTemplateSpecContainerByName(deploymentNameAgentPrincipal, *principalDeployment) + if container == nil { + return false + } + return container.Image == "quay.io/argoprojlabs/argocd-agent:v0.5.1" + }, "120s", "5s").Should(BeTrue(), "Principal deployment should have the updated image") + + By("verify that deployment is in Ready state") + + Eventually(principalDeployment, "120s", "5s").Should(deploymentFixture.HaveReadyReplicas(1), "Principal deployment should become ready") + + By("Verify environment variables are updated correctly") + + expectedEnvVariables[argocdagent.EnvArgoCDPrincipalLogLevel] = "trace" + expectedEnvVariables[argocdagent.EnvArgoCDPrincipalLogFormat] = "json" + expectedEnvVariables[argocdagent.EnvArgoCDPrincipalKeepAliveMinInterval] = "60s" + expectedEnvVariables[argocdagent.EnvArgoCDPrincipalEnableWebSocket] = "true" + expectedEnvVariables[argocdagent.EnvArgoCDPrincipalAllowedNamespaces] = "agent-managed,agent-autonomous" + expectedEnvVariables[argocdagent.EnvArgoCDPrincipalNamespaceCreateEnable] = "true" + expectedEnvVariables[argocdagent.EnvArgoCDPrincipalNamespaceCreatePattern] = "agent-.*" + expectedEnvVariables[argocdagent.EnvArgoCDPrincipalNamespaceCreateLabels] = "environment=agent" + expectedEnvVariables[argocdagent.EnvArgoCDPrincipalTLSServerAllowGenerate] = "false" + expectedEnvVariables[argocdagent.EnvArgoCDPrincipalJWTAllowGenerate] = "false" + expectedEnvVariables[argocdagent.EnvArgoCDPrincipalResourceProxySecretName] = "argocd-agent-resource-proxy-tls-v2" + expectedEnvVariables[argocdagent.EnvArgoCDPrincipalResourceProxyCaSecretName] = "argocd-agent-ca-v2" + expectedEnvVariables[argocdagent.EnvArgoCDPrincipalTLSSecretName] = "argocd-agent-principal-tls-v2" + expectedEnvVariables[argocdagent.EnvArgoCDPrincipalTLSServerRootCASecretName] = "argocd-agent-ca-v2" + expectedEnvVariables[argocdagent.EnvArgoCDPrincipalJwtSecretName] = "argocd-agent-jwt-v2" + + for key, value := range expectedEnvVariables { + Expect(container.Env).To(ContainElement(corev1.EnvVar{Name: key, Value: value}), "Environment variable %s should be set to %s", key, value) + } + }) + + It("should handle route disabled configuration correctly", func() { + + By("Create ArgoCD instance with route disabled") + + argoCD.Spec.ArgoCDAgent.Principal.Server.Route = argov1beta1api.ArgoCDAgentPrincipalRouteSpec{ + Enabled: ptr.To(false), + } + Expect(k8sClient.Create(ctx, argoCD)).To(Succeed()) + + By("Verify expected resources are created for principal pod") + + verifyExpectedResourcesExist(ns, false) + + By("Verify Route for principal does not exist") + + if fixture.RunningOnOpenShift() { + Consistently(principalRoute, "10s", "1s").Should(k8sFixture.NotExistByName()) + } + }) + + It("should handle route enabled configuration correctly", func() { + + By("Create ArgoCD instance with route enabled") + + argoCD.Spec.ArgoCDAgent.Principal.Server.Route = argov1beta1api.ArgoCDAgentPrincipalRouteSpec{ + Enabled: ptr.To(true), + } + Expect(k8sClient.Create(ctx, argoCD)).To(Succeed()) + + By("Verify expected resources are created for principal pod") + + verifyExpectedResourcesExist(ns) + + By("Verify Route for principal exists") + + if fixture.RunningOnOpenShift() { + Eventually(principalRoute).Should(k8sFixture.ExistByName()) + } + }) + + It("should handle route toggle from enabled to disabled correctly", func() { + + By("Create ArgoCD instance with route enabled") + + argoCD.Spec.ArgoCDAgent.Principal.Server.Route = argov1beta1api.ArgoCDAgentPrincipalRouteSpec{ + Enabled: ptr.To(true), + } + Expect(k8sClient.Create(ctx, argoCD)).To(Succeed()) + + By("Verify expected resources are created for principal pod") + + verifyExpectedResourcesExist(ns) + + By("Verify Route for principal exists") + + if fixture.RunningOnOpenShift() { + Eventually(principalRoute).Should(k8sFixture.ExistByName()) + } + + By("Disable route while keeping principal enabled") + + Expect(k8sClient.Get(ctx, client.ObjectKey{Name: argoCDAgentInstanceNamePrincipal, Namespace: ns.Name}, argoCD)).To(Succeed()) + argocdFixture.Update(argoCD, func(ac *argov1beta1api.ArgoCD) { + ac.Spec.ArgoCDAgent.Principal.Server.Route.Enabled = ptr.To(false) + }) + + By("Verify Route for principal is deleted") + + if fixture.RunningOnOpenShift() { + Eventually(principalRoute).Should(k8sFixture.NotExistByName()) + } + + By("Verify other principal resources still exist") + + Eventually(principalDeployment).Should(k8sFixture.ExistByName()) + + for _, serviceName := range []string{ + fmt.Sprintf("%s-agent-principal", argoCDAgentInstanceNamePrincipal), + fmt.Sprintf("%s-agent-principal-metrics", argoCDAgentInstanceNamePrincipal), + fmt.Sprintf("%s-agent-principal-redisproxy", argoCDAgentInstanceNamePrincipal), + fmt.Sprintf("%s-agent-principal-resource-proxy", argoCDAgentInstanceNamePrincipal), + fmt.Sprintf("%s-agent-principal-healthz", argoCDAgentInstanceNamePrincipal), + } { + service := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: serviceName, + Namespace: ns.Name, + }, + } + Eventually(service, "30s", "2s").Should(k8sFixture.ExistByName(), "Service '%s' should exist in namespace '%s'", serviceName, ns.Name) + } + + By("Re-enable route") + + Expect(k8sClient.Get(ctx, client.ObjectKey{Name: argoCDAgentInstanceNamePrincipal, Namespace: ns.Name}, argoCD)).To(Succeed()) + argocdFixture.Update(argoCD, func(ac *argov1beta1api.ArgoCD) { + ac.Spec.ArgoCDAgent.Principal.Server.Route.Enabled = ptr.To(true) + }) + + By("Verify Route for principal is recreated") + + if fixture.RunningOnOpenShift() { + Eventually(principalRoute).Should(k8sFixture.ExistByName()) + } + }) + + It("should handle service type ClusterIP configuration correctly", func() { + + By("Create ArgoCD instance with service type ClusterIP") + + argoCD.Spec.ArgoCDAgent.Principal.Server.Service = argov1beta1api.ArgoCDAgentPrincipalServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + } + Expect(k8sClient.Create(ctx, argoCD)).To(Succeed()) + + By("Verify expected resources are created for principal pod") + + verifyExpectedResourcesExist(ns) + + By("Verify principal service has ClusterIP type") + + principalService := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: deploymentNameAgentPrincipal, + Namespace: ns.Name, + }, + } + Eventually(principalService).Should(k8sFixture.ExistByName()) + Expect(principalService.Spec.Type).To(Equal(corev1.ServiceTypeClusterIP)) + }) + + It("should handle service type LoadBalancer configuration correctly", func() { + + By("Create ArgoCD instance with service type LoadBalancer") + + argoCD.Spec.ArgoCDAgent.Principal.Server.Service = argov1beta1api.ArgoCDAgentPrincipalServiceSpec{ + Type: corev1.ServiceTypeLoadBalancer, + } + Expect(k8sClient.Create(ctx, argoCD)).To(Succeed()) + + By("Verify expected resources are created for principal pod") + + verifyExpectedResourcesExist(ns) + + By("Verify principal service has LoadBalancer type") + + principalService := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: deploymentNameAgentPrincipal, + Namespace: ns.Name, + }, + } + Eventually(principalService).Should(k8sFixture.ExistByName()) + Expect(principalService.Spec.Type).To(Equal(corev1.ServiceTypeLoadBalancer)) + }) + + It("should handle service type updates correctly", func() { + + By("Create ArgoCD instance with service type ClusterIP") + + argoCD.Spec.ArgoCDAgent.Principal.Server.Service = argov1beta1api.ArgoCDAgentPrincipalServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + } + Expect(k8sClient.Create(ctx, argoCD)).To(Succeed()) + + By("Verify expected resources are created for principal pod") + + verifyExpectedResourcesExist(ns) + + By("Verify principal service has ClusterIP type initially") + + principalService := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: deploymentNameAgentPrincipal, + Namespace: ns.Name, + }, + } + Eventually(principalService).Should(k8sFixture.ExistByName()) + Expect(principalService.Spec.Type).To(Equal(corev1.ServiceTypeClusterIP)) + + By("Update service type to LoadBalancer") + + Expect(k8sClient.Get(ctx, client.ObjectKey{Name: argoCDAgentInstanceNamePrincipal, Namespace: ns.Name}, argoCD)).To(Succeed()) + argocdFixture.Update(argoCD, func(ac *argov1beta1api.ArgoCD) { + ac.Spec.ArgoCDAgent.Principal.Server.Service.Type = corev1.ServiceTypeLoadBalancer + }) + + By("Verify principal service type is updated to LoadBalancer") + + Eventually(func() corev1.ServiceType { + err := k8sClient.Get(ctx, client.ObjectKey{Name: deploymentNameAgentPrincipal, Namespace: ns.Name}, principalService) + if err != nil { + return "" + } + return principalService.Spec.Type + }, "30s", "2s").Should(Equal(corev1.ServiceTypeLoadBalancer)) + }) + }) +}) diff --git a/test/openshift/e2e/ginkgo/sequential/1-052_validate_argocd_agent_agent_test.go b/test/openshift/e2e/ginkgo/sequential/1-052_validate_argocd_agent_agent_test.go new file mode 100644 index 000000000..743cb4e89 --- /dev/null +++ b/test/openshift/e2e/ginkgo/sequential/1-052_validate_argocd_agent_agent_test.go @@ -0,0 +1,448 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sequential + +import ( + "context" + "fmt" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" + + argov1beta1api "github.com/argoproj-labs/argocd-operator/api/v1beta1" + "github.com/argoproj-labs/argocd-operator/controllers/argocdagent/agent" + "github.com/redhat-developer/gitops-operator/test/openshift/e2e/ginkgo/fixture" + argocdFixture "github.com/redhat-developer/gitops-operator/test/openshift/e2e/ginkgo/fixture/argocd" + deploymentFixture "github.com/redhat-developer/gitops-operator/test/openshift/e2e/ginkgo/fixture/deployment" + k8sFixture "github.com/redhat-developer/gitops-operator/test/openshift/e2e/ginkgo/fixture/k8s" + fixtureUtils "github.com/redhat-developer/gitops-operator/test/openshift/e2e/ginkgo/fixture/utils" +) + +var _ = Describe("GitOps Operator Sequential E2E Tests", func() { + Context("1-052_validate_argocd_agent_agent", func() { + + var ( + k8sClient client.Client + ctx context.Context + argoCD *argov1beta1api.ArgoCD + ns *corev1.Namespace + cleanupFunc func() + serviceAccount *corev1.ServiceAccount + role *rbacv1.Role + roleBinding *rbacv1.RoleBinding + clusterRole *rbacv1.ClusterRole + clusterRoleBinding *rbacv1.ClusterRoleBinding + serviceNames []string + deploymentNames []string + agentDeployment *appsv1.Deployment + expectedEnvVariables map[string]string + secretNames []string + ) + + BeforeEach(func() { + fixture.EnsureSequentialCleanSlate() + fixture.SetEnvInOperatorSubscriptionOrDeployment("ARGOCD_CLUSTER_CONFIG_NAMESPACES", "openshift-gitops, argocd-agent-agent-1-052") + + k8sClient, _ = fixtureUtils.GetE2ETestKubeClient() + ctx = context.Background() + ns, cleanupFunc = fixture.CreateNamespaceWithCleanupFunc("argocd-agent-agent-1-052") + + // Define ArgoCD CR with agent enabled + argoCD = &argov1beta1api.ArgoCD{ + ObjectMeta: metav1.ObjectMeta{ + Name: argoCDAgentInstanceNameAgent, + Namespace: ns.Name, + }, + Spec: argov1beta1api.ArgoCDSpec{ + Controller: argov1beta1api.ArgoCDApplicationControllerSpec{ + Enabled: ptr.To(false), + }, + Server: argov1beta1api.ArgoCDServerSpec{ + Enabled: ptr.To(false), + }, + ArgoCDAgent: &argov1beta1api.ArgoCDAgentSpec{ + Agent: &argov1beta1api.AgentSpec{ + Enabled: ptr.To(true), + Creds: "mtls:any", + LogLevel: "info", + LogFormat: "text", + Client: &argov1beta1api.AgentClientSpec{ + PrincipalServerAddress: "argocd-agent-principal.example.com", + PrincipalServerPort: "443", + Mode: string(argov1beta1api.AgentModeManaged), + EnableWebSocket: ptr.To(false), + EnableCompression: ptr.To(false), + KeepAliveInterval: "30s", + }, + TLS: &argov1beta1api.AgentTLSSpec{ + SecretName: agentClientTLSSecretName, + RootCASecretName: agentRootCASecretName, + Insecure: ptr.To(false), + }, + Redis: &argov1beta1api.AgentRedisSpec{ + ServerAddress: fmt.Sprintf("%s-%s:%d", argoCDAgentInstanceNameAgent, "redis", ArgoCDDefaultRedisPort), + }, + }, + }, + }, + } + + // Define required resources for agent pod + serviceAccount = &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: deploymentNameAgent, + Namespace: ns.Name, + }, + } + + role = &rbacv1.Role{ + ObjectMeta: metav1.ObjectMeta{ + Name: deploymentNameAgent, + Namespace: ns.Name, + }, + } + + roleBinding = &rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: deploymentNameAgent, + Namespace: ns.Name, + }, + } + + clusterRole = &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%s-agent-agent", argoCDAgentInstanceNameAgent, ns.Name), + }, + } + + clusterRoleBinding = &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%s-agent-agent", argoCDAgentInstanceNameAgent, ns.Name), + }, + } + + // List required secrets for agent pod + secretNames = []string{ + agentClientTLSSecretName, + agentRootCASecretName, + fmt.Sprintf("%s-redis-initial-password", argoCDAgentInstanceNameAgent), + } + + serviceNames = []string{ + fmt.Sprintf("%s-agent-agent-metrics", argoCDAgentInstanceNameAgent), + fmt.Sprintf("%s-agent-agent-healthz", argoCDAgentInstanceNameAgent), + fmt.Sprintf("%s-redis", argoCDAgentInstanceNameAgent), + } + deploymentNames = []string{fmt.Sprintf("%s-redis", argoCDAgentInstanceNameAgent)} + + agentDeployment = &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: deploymentNameAgent, + Namespace: ns.Name, + }, + } + + // List environment variables with expected values for the agent deployment + expectedEnvVariables = map[string]string{ + agent.EnvArgoCDAgentLogLevel: "info", + agent.EnvArgoCDAgentNamespace: ns.Name, + agent.EnvArgoCDAgentServerAddress: "argocd-agent-principal.example.com", + agent.EnvArgoCDAgentServerPort: "443", + agent.EnvArgoCDAgentLogFormat: "text", + agent.EnvArgoCDAgentTLSSecretName: agentClientTLSSecretName, + agent.EnvArgoCDAgentTLSInsecure: "false", + agent.EnvArgoCDAgentTLSRootCASecretName: agentRootCASecretName, + agent.EnvArgoCDAgentMode: string(argov1beta1api.AgentModeManaged), + agent.EnvArgoCDAgentCreds: "mtls:any", + agent.EnvArgoCDAgentEnableWebSocket: "false", + agent.EnvArgoCDAgentEnableCompression: "false", + agent.EnvArgoCDAgentKeepAliveInterval: "30s", + agent.EnvArgoCDAgentRedisAddress: fmt.Sprintf("%s-%s:%d", argoCDAgentInstanceNameAgent, "redis", ArgoCDDefaultRedisPort), + agent.EnvArgoCDAgentEnableResourceProxy: "true", + } + }) + + AfterEach(func() { + By("Cleanup cluster-scoped resources") + _ = k8sClient.Delete(ctx, clusterRole) + _ = k8sClient.Delete(ctx, clusterRoleBinding) + + By("Cleanup namespace") + if cleanupFunc != nil { + cleanupFunc() + } + }) + + // verifyExpectedResourcesExist will verify that the resources that are created for agent and ArgoCD are created. + verifyExpectedResourcesExist := func(ns *corev1.Namespace) { + + By("verifying expected resources exist") + Eventually(&corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretNames[2], Namespace: ns.Name, + }}, "60s", "2s").Should(k8sFixture.ExistByName()) + Eventually(serviceAccount).Should(k8sFixture.ExistByName()) + Eventually(role).Should(k8sFixture.ExistByName()) + Eventually(roleBinding).Should(k8sFixture.ExistByName()) + Eventually(clusterRole).Should(k8sFixture.ExistByName()) + defer func() { + _ = k8sClient.Delete(ctx, clusterRole) + }() + + Eventually(clusterRoleBinding).Should(k8sFixture.ExistByName()) + defer func() { + _ = k8sClient.Delete(ctx, clusterRoleBinding) + }() + + for _, serviceName := range serviceNames { + + By("verifying Service '" + serviceName + "' exists and is a LoadBalancer or ClusterIP depending on which service") + + service := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: serviceName, + Namespace: ns.Name, + }, + } + Eventually(service).Should(k8sFixture.ExistByName()) + Expect(string(service.Spec.Type)).To(Equal("ClusterIP")) + } + + for _, deploymentName := range deploymentNames { + + By("verifying Deployment '" + deploymentName + "' exists and is ready") + + depl := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: deploymentName, + Namespace: ns.Name, + }, + } + Eventually(depl).Should(k8sFixture.ExistByName()) + } + + By("verifying primary agent Deployment has expected values") + + Eventually(agentDeployment).Should(k8sFixture.ExistByName()) + Eventually(agentDeployment).Should(k8sFixture.HaveLabelWithValue("app.kubernetes.io/component", string(argov1beta1api.AgentComponentTypeAgent))) + Eventually(agentDeployment).Should(k8sFixture.HaveLabelWithValue("app.kubernetes.io/managed-by", argoCDAgentInstanceNameAgent)) + Eventually(agentDeployment).Should(k8sFixture.HaveLabelWithValue("app.kubernetes.io/name", deploymentNameAgent)) + Eventually(agentDeployment).Should(k8sFixture.HaveLabelWithValue("app.kubernetes.io/part-of", "argocd-agent")) + } + + // verifyResourcesDeleted will verify that the various resources that are created for agent are deleted. + verifyResourcesDeleted := func() { + + By("verifying resources are deleted for agent pod") + + Eventually(serviceAccount).Should(k8sFixture.NotExistByName()) + Eventually(role).Should(k8sFixture.NotExistByName()) + Eventually(roleBinding).Should(k8sFixture.NotExistByName()) + Eventually(clusterRole).Should(k8sFixture.NotExistByName()) + Eventually(clusterRoleBinding).Should(k8sFixture.NotExistByName()) + Eventually(agentDeployment).Should(k8sFixture.NotExistByName()) + + for _, serviceName := range []string{fmt.Sprintf("%s-agent-agent-metrics", argoCDAgentInstanceNameAgent), fmt.Sprintf("%s-agent-agent-healthz", argoCDAgentInstanceNameAgent)} { + service := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: serviceName, + Namespace: ns.Name, + }, + } + Eventually(service).Should(k8sFixture.NotExistByName()) + } + } + + It("should create argocd agent agent resources, but pod should not be expected to run successfully without principal", func() { + // Change log level to trace and custom image name + argoCD.Spec.ArgoCDAgent.Agent.LogLevel = "trace" + argoCD.Spec.ArgoCDAgent.Agent.Image = "quay.io/user/argocd-agent:v1" + + By("Create ArgoCD instance") + + Expect(k8sClient.Create(ctx, argoCD)).To(Succeed()) + + By("Verify expected resources are created for agent pod") + + verifyExpectedResourcesExist(ns) + + By("Verify agent has the custom image we specified in ArgoCD CR") + + container := deploymentFixture.GetTemplateSpecContainerByName(deploymentNameAgent, *agentDeployment) + Expect(container).ToNot(BeNil()) + Expect(container.Image).To(Equal("quay.io/user/argocd-agent:v1")) + + By("Verify environment variables are set correctly") + + // update expected value in default environment variables according to ArgoCD CR in the test + expectedEnvVariables[agent.EnvArgoCDAgentLogLevel] = "trace" + + for key, value := range expectedEnvVariables { + Expect(container.Env).To(ContainElement(corev1.EnvVar{Name: key, Value: value}), "Environment variable %s should be set to %s", key, value) + } + + By("Disable agent") + + Expect(k8sClient.Get(ctx, client.ObjectKey{Name: argoCDAgentInstanceNameAgent, Namespace: ns.Name}, argoCD)).To(Succeed()) + + argocdFixture.Update(argoCD, func(ac *argov1beta1api.ArgoCD) { + ac.Spec.ArgoCDAgent.Agent.Enabled = ptr.To(false) + }) + + By("Verify agent resources are deleted") + + verifyResourcesDeleted() + }) + + It("should create argocd agent agent resources with default image, but pod will not start without principal", func() { + + // Add a custom environment variable to the agent client + argoCD.Spec.ArgoCDAgent.Agent.Env = []corev1.EnvVar{{Name: "TEST_ENV", Value: "test_value"}} + + By("Create ArgoCD instance") + + Expect(k8sClient.Create(ctx, argoCD)).To(Succeed()) + + By("Verify expected resources are created for agent pod") + + verifyExpectedResourcesExist(ns) + + By("Verify agent uses the default agent image") + + container := deploymentFixture.GetTemplateSpecContainerByName(deploymentNameAgent, *agentDeployment) + Expect(container).ToNot(BeNil()) + Expect(container.Image).To(Equal(ArgoCDAgentAgentDefaultImageName)) + + By("Verify environment variables are set correctly") + + for key, value := range expectedEnvVariables { + Expect(container.Env).To(ContainElement(corev1.EnvVar{Name: key, Value: value}), "Environment variable %s should be set to %s", key, value) + } + + Expect(container.Env).To(ContainElement(And( + HaveField("Name", agent.EnvRedisPassword), + HaveField("ValueFrom.SecretKeyRef", Not(BeNil())), + )), "REDIS_PASSWORD should be set with valueFrom.secretKeyRef") + + By("Verify custom environment variable is present") + + Expect(container.Env).To(ContainElement(corev1.EnvVar{Name: "TEST_ENV", Value: "test_value"}), "Custom environment variable TEST_ENV should be set") + + By("Disable agent") + + Expect(k8sClient.Get(ctx, client.ObjectKey{Name: argoCDAgentInstanceNameAgent, Namespace: ns.Name}, argoCD)).To(Succeed()) + + argocdFixture.Update(argoCD, func(ac *argov1beta1api.ArgoCD) { + ac.Spec.ArgoCDAgent.Agent.Enabled = nil + }) + + By("Verify agent resources are deleted") + + verifyResourcesDeleted() + }) + + It("Should reflect configuration changes from ArgoCD CR to the agent deployment", func() { + + By("Create ArgoCD instance") + + argoCD.Spec.ArgoCDAgent.Agent.Image = "quay.io/argoprojlabs/argocd-agent:v0.5.0" + Expect(k8sClient.Create(ctx, argoCD)).To(Succeed()) + + By("Verify expected resources are created for agent pod") + + verifyExpectedResourcesExist(ns) + + By("Verify agent has the custom image we specified in ArgoCD CR") + + container := deploymentFixture.GetTemplateSpecContainerByName(deploymentNameAgent, *agentDeployment) + Expect(container).ToNot(BeNil()) + Expect(container.Image).To(Equal("quay.io/argoprojlabs/argocd-agent:v0.5.0")) + + By("Verify environment variables are set correctly") + + // update expected value in default environment variables according to ArgoCD CR in the test + for key, value := range expectedEnvVariables { + Expect(container.Env).To(ContainElement(corev1.EnvVar{Name: key, Value: value}), "Environment variable %s should be set to %s", key, value) + } + + By("Update ArgoCD CR with new configuration") + + Expect(k8sClient.Get(ctx, client.ObjectKey{Name: argoCDAgentInstanceNameAgent, Namespace: ns.Name}, argoCD)).To(Succeed()) + + argocdFixture.Update(argoCD, func(ac *argov1beta1api.ArgoCD) { + + ac.Spec.ArgoCDAgent.Agent.LogLevel = "trace" + ac.Spec.ArgoCDAgent.Agent.LogFormat = "json" + ac.Spec.ArgoCDAgent.Agent.Image = "quay.io/argoprojlabs/argocd-agent:v0.5.1" + + ac.Spec.ArgoCDAgent.Agent.Client.KeepAliveInterval = "60s" + ac.Spec.ArgoCDAgent.Agent.Client.EnableWebSocket = ptr.To(true) + ac.Spec.ArgoCDAgent.Agent.Client.EnableCompression = ptr.To(true) + ac.Spec.ArgoCDAgent.Agent.Client.Mode = string(argov1beta1api.AgentModeAutonomous) + ac.Spec.ArgoCDAgent.Agent.Client.PrincipalServerAddress = "argocd-agent-principal-updated.example.com" + ac.Spec.ArgoCDAgent.Agent.Client.PrincipalServerPort = "8443" + + ac.Spec.ArgoCDAgent.Agent.TLS.Insecure = ptr.To(true) + ac.Spec.ArgoCDAgent.Agent.TLS.SecretName = "argocd-agent-client-tls-v2" + ac.Spec.ArgoCDAgent.Agent.TLS.RootCASecretName = "argocd-agent-ca-v2" + + }) + + By("Verify agent has the updated image we specified in ArgoCD CR") + + Eventually(agentDeployment).Should(k8sFixture.ExistByName()) + Eventually( + func() bool { + // Fetch the latest deployment from the cluster + err := k8sClient.Get(ctx, client.ObjectKey{Name: deploymentNameAgent, Namespace: ns.Name}, agentDeployment) + if err != nil { + GinkgoWriter.Println("Error getting deployment for image check: ", err) + return false + } + container = deploymentFixture.GetTemplateSpecContainerByName(deploymentNameAgent, *agentDeployment) + if container == nil { + return false + } + return container.Image == "quay.io/argoprojlabs/argocd-agent:v0.5.1" + }, "120s", "5s").Should(BeTrue(), "Agent deployment should have the updated image") + + By("Verify environment variables are updated correctly") + + expectedEnvVariables[agent.EnvArgoCDAgentLogLevel] = "trace" + expectedEnvVariables[agent.EnvArgoCDAgentLogFormat] = "json" + expectedEnvVariables[agent.EnvArgoCDAgentKeepAliveInterval] = "60s" + expectedEnvVariables[agent.EnvArgoCDAgentEnableWebSocket] = "true" + expectedEnvVariables[agent.EnvArgoCDAgentEnableCompression] = "true" + expectedEnvVariables[agent.EnvArgoCDAgentMode] = string(argov1beta1api.AgentModeAutonomous) + expectedEnvVariables[agent.EnvArgoCDAgentServerAddress] = "argocd-agent-principal-updated.example.com" + expectedEnvVariables[agent.EnvArgoCDAgentServerPort] = "8443" + expectedEnvVariables[agent.EnvArgoCDAgentTLSInsecure] = "true" + expectedEnvVariables[agent.EnvArgoCDAgentTLSSecretName] = "argocd-agent-client-tls-v2" + expectedEnvVariables[agent.EnvArgoCDAgentTLSRootCASecretName] = "argocd-agent-ca-v2" + + for key, value := range expectedEnvVariables { + Expect(container.Env).To(ContainElement(corev1.EnvVar{Name: key, Value: value}), "Environment variable %s should be set to %s", key, value) + } + }) + }) +}) diff --git a/test/openshift/e2e/ginkgo/sequential/1-053_validate_argocd_agent_principal_connected_test.go b/test/openshift/e2e/ginkgo/sequential/1-053_validate_argocd_agent_principal_connected_test.go new file mode 100644 index 000000000..216b09c86 --- /dev/null +++ b/test/openshift/e2e/ginkgo/sequential/1-053_validate_argocd_agent_principal_connected_test.go @@ -0,0 +1,624 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sequential + +import ( + "context" + "fmt" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/redhat-developer/gitops-operator/test/openshift/e2e/ginkgo/fixture" + appFixture "github.com/redhat-developer/gitops-operator/test/openshift/e2e/ginkgo/fixture/application" + deploymentFixture "github.com/redhat-developer/gitops-operator/test/openshift/e2e/ginkgo/fixture/deployment" + fixtureUtils "github.com/redhat-developer/gitops-operator/test/openshift/e2e/ginkgo/fixture/utils" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" + + argocdv1alpha1 "github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1" + "github.com/argoproj/gitops-engine/pkg/health" + + argov1beta1api "github.com/argoproj-labs/argocd-operator/api/v1beta1" + "github.com/argoproj-labs/argocd-operator/common" + + agentFixture "github.com/redhat-developer/gitops-operator/test/openshift/e2e/ginkgo/fixture/agent" +) + +/* +### Namespace Hierarchy for this test: + +Test Cluster (Has a Hub and two Spokes (Managed and Autonomous) simulated) +│ +├─ 🏛️ Hub Cluster +│ ├─ Namespace: ns-hosting-principal +│ │ ├─ ArgoCD: argocd-hub (Principal enabled) +│ │ ├─ Deployment: argocd-hub-agent-principal +│ │ ├─ Service: argocd-hub-agent-principal (Type LoadBalancer) +│ │ ├─ Secrets: TLS, JWT, CA, Cluster registration secrets +│ │ └─ AppProject: agent-app-project ("Source of truth" for managed agent, delivered to agent by principal) +│ │ +│ ├─ Namespace: managed-cluster-in-hub (Logical representation of managed cluster in hub) +│ │ └─ Application: app-managed ("Source of truth" for managed agent, delivered to agent by principal) +│ │ +│ │ +│ └─ Namespace: autonomous-cluster-in-hub (Logical representation of autonomous cluster in hub) +| └─ Application: app-autonomous ("Source of truth" is autonomous agent, delivered to principal by agent) +│ +├─ 🔵 Managed Spoke Cluster +│ ├─ Namespace: ns-hosting-managed-agent +│ │ ├─ ArgoCD: argocd-spoke (Agent enabled, Managed mode) +│ │ ├─ Deployment: argocd-spoke-agent-agent +│ │ ├─ Secrets: Client TLS, CA +| | └─ Application: app-managed ("Source of truth" is principal, but Reconciled and deployed in spoke by agent) +│ │ +│ └─ Namespace: ns-hosting-app-in-managed-cluster +│ └─ Pod/Service/Route: spring-petclinic (Application resources deployed by agent in spoke) +│ +└─ 🔵 Autonomous Spoke Cluster + ├─ Namespace: ns-hosting-autonomous-agent + │ ├─ ArgoCD: argocd-spoke (Agent enabled, Autonomous mode) + │ ├─ Deployment: argocd-spoke-agent-agent + │ ├─ Secrets: Client TLS, CA + │ ├─ AppProject: agent-app-project ("Source of truth" is autonomous agent, delivered to principal by agent) + │ └─ Application: app-autonomous ("Source of truth" is autonomous agent, delivered to principal by agent, Reconciled and deployed in spoke by agent) + │ + └─ Namespace: ns-hosting-app-in-autonomous-cluster + └─ Pod/Service/Route: spring-petclinic (Application resources deployed by agent in spoke) +*/ + +const ( + // ArgoCD instance names + argoCDAgentInstanceNamePrincipal = "argocd-hub" + argoCDAgentInstanceNameAgent = "argocd-spoke" + + // Agent and Principal deployment names + deploymentNameAgentPrincipal = "argocd-hub-agent-principal" + deploymentNameAgent = "argocd-spoke-agent-agent" + + // Names given to clusters in hub + managedAgentClusterName = "managed-cluster-in-hub" + autonomousAgentClusterName = "autonomous-cluster-in-hub" + + // Application names + applicationNameManagedAgent = "app-managed" + applicationNameAutonomousAgent = "app-autonomous" + + // AppProject names + agentAppProjectName = "agent-app-project" + + // Namespaces hosting the principal and agent deployments + namespaceAgentPrincipal = "ns-hosting-principal" + namespaceManagedAgent = "ns-hosting-managed-agent" + namespaceAutonomousAgent = "ns-hosting-autonomous-agent" + + // Namespaces hosting application resources in managed and autonomous clusters + managedAgentApplicationNamespace = "ns-hosting-app-in-managed-cluster" + autonomousAgentApplicationNamespace = "ns-hosting-app-in-autonomous-cluster" + + // Secret names + agentJWTSecretName = "argocd-agent-jwt" + agentPrincipalTLSSecretName = "argocd-agent-principal-tls" + agentRootCASecretName = "argocd-agent-ca" + agentClientTLSSecretName = "argocd-agent-client-tls" + agentResourceProxyTLSSecretName = "argocd-agent-resource-proxy-tls" + + // Redis port + ArgoCDDefaultRedisPort = 6379 + + // ArgoCDAgentAgentDefaultImageName is the default image name for the ArgoCD agent's agent component. + ArgoCDAgentAgentDefaultImageName = "quay.io/argoprojlabs/argocd-agent:v0.5.2" + + // ArgoCDAgentPrincipalDefaultImageName is the default image name for the ArgoCD agent's principal component. + ArgoCDAgentPrincipalDefaultImageName = "quay.io/argoprojlabs/argocd-agent:v0.5.2" +) + +var ( + principalStartupLogs = []string{ + "Starting metrics server", + "Redis proxy started", + "Application informer synced and ready", + "AppProject informer synced and ready", + "Resource proxy started", + "Namespace informer synced and ready", + "Starting healthz server", + } + + agentStartupLogs = []string{ + "Starting metrics server", + "Starting healthz server", + "Authentication successful", + "Connected to argocd-agent", + "Starting event writer", + "Starting to send events to event stream", + "Starting to receive events from event stream", + } +) + +var _ = Describe("GitOps Operator Sequential E2E Tests", func() { + Context("1-053_validate_argocd_agent_principal_connected_test", func() { + var ( + k8sClient client.Client + ctx context.Context + cleanupFuncs []func() + registerCleanup func(func()) + clusterRolePrincipal *rbacv1.ClusterRole + clusterRoleBindingPrincipal *rbacv1.ClusterRoleBinding + clusterRoleManagedAgent *rbacv1.ClusterRole + clusterRoleBindingManagedAgent *rbacv1.ClusterRoleBinding + clusterRoleAutonomousAgent *rbacv1.ClusterRole + clusterRoleBindingAutonomousAgent *rbacv1.ClusterRoleBinding + ) + + BeforeEach(func() { + fixture.EnsureSequentialCleanSlate() + fixture.SetEnvInOperatorSubscriptionOrDeployment("ARGOCD_CLUSTER_CONFIG_NAMESPACES", "openshift-gitops, ns-hosting-principal, ns-hosting-managed-agent, ns-hosting-autonomous-agent") + + k8sClient, _ = fixtureUtils.GetE2ETestKubeClient() + ctx = context.Background() + cleanupFuncs = nil + registerCleanup = func(fn func()) { + if fn != nil { + cleanupFuncs = append(cleanupFuncs, fn) + } + } + + clusterRolePrincipal = &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%s-agent-principal", argoCDAgentInstanceNamePrincipal, namespaceAgentPrincipal), + }, + } + clusterRoleBindingPrincipal = &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%s-agent-principal", argoCDAgentInstanceNamePrincipal, namespaceAgentPrincipal), + }, + } + + clusterRoleManagedAgent = &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%s-agent-agent", argoCDAgentInstanceNameAgent, namespaceManagedAgent), + }, + } + clusterRoleBindingManagedAgent = &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%s-agent-agent", argoCDAgentInstanceNameAgent, namespaceManagedAgent), + }, + } + + clusterRoleAutonomousAgent = &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%s-agent-agent", argoCDAgentInstanceNameAgent, namespaceAutonomousAgent), + }, + } + clusterRoleBindingAutonomousAgent = &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%s-agent-agent", argoCDAgentInstanceNameAgent, namespaceAutonomousAgent), + }, + } + + // Create required namespaces and cleanup functions + _, cleanupFuncClusterManaged := fixture.CreateNamespaceWithCleanupFunc(managedAgentClusterName) + registerCleanup(cleanupFuncClusterManaged) + + _, cleanupFuncClusterAutonomous := fixture.CreateNamespaceWithCleanupFunc(autonomousAgentClusterName) + registerCleanup(cleanupFuncClusterAutonomous) + + // Create namespaces with managed-by label for the agent's application controller to deploy resources + _, cleanupFuncManagedApplication := fixture.CreateManagedNamespaceWithCleanupFunc(managedAgentApplicationNamespace, namespaceManagedAgent) + registerCleanup(cleanupFuncManagedApplication) + + _, cleanupFuncAutonomousApplication := fixture.CreateManagedNamespaceWithCleanupFunc(autonomousAgentApplicationNamespace, namespaceAutonomousAgent) + registerCleanup(cleanupFuncAutonomousApplication) + }) + + // This function checks principal logs to verify it has connected to both agents. + validatePrincipalAndAgentConnection := func() { + By("Verify principal is connected to the both agents") + + agentFixture.VerifyLogs(deploymentNameAgentPrincipal, namespaceAgentPrincipal, []string{ + fmt.Sprintf("Mapped cluster %s to agent %s", managedAgentClusterName, managedAgentClusterName), + fmt.Sprintf("Mapped cluster %s to agent %s", autonomousAgentClusterName, autonomousAgentClusterName), + fmt.Sprintf("Updated connection status to 'Successful' in Cluster: '%s' mapped with Agent: '%s'", managedAgentClusterName, managedAgentClusterName), + fmt.Sprintf("Updated connection status to 'Successful' in Cluster: '%s' mapped with Agent: '%s'", autonomousAgentClusterName, autonomousAgentClusterName), + "Processing clusterCacheInfoUpdate event", + "Updated cluster cache stats in cluster.", + }) + } + + // This function deploys an application and verifies it is healthy and synced. + deployAndValidateApplication := func(application *argocdv1alpha1.Application) { + + By("Deploy application: " + application.Name + " in namespace: " + application.Namespace) + Expect(k8sClient.Create(ctx, application)).To(Succeed()) + + By("Verify application: " + application.Name + " in namespace: " + application.Namespace + " is healthy and synced") + Eventually(application, "180s", "5s").Should(appFixture.HaveSyncStatusCode(argocdv1alpha1.SyncStatusCodeSynced), "Application should be synced") + Eventually(application, "180s", "5s").Should(appFixture.HaveHealthStatusCode(health.HealthStatusHealthy), "Application should be healthy") + } + + // This test verifies that: + // 1. A cluster-scoped ArgoCD instance with principal component enabled and a cluster-scoped ArgoCD instance + // with agent component enabled are deployed in both "managed" and "autonomous" modes. + // 2. Each agent successfully connects to the principal. + // 3. Applications can be deployed in both modes, and are verified to be healthy and in sync. + // This validates the core connectivity and basic workflow of agent-principal architecture, including RBAC, connection, and application propagation. + It("Should deploy ArgoCD principal and agent instances in both modes and verify they are working as expected", func() { + + By("Deploy principal and verify it starts successfully") + deployPrincipal(ctx, k8sClient, registerCleanup) + + By("Deploy managed agent and verify it starts successfully") + deployAgent(ctx, k8sClient, registerCleanup, argov1beta1api.AgentModeManaged) + + By("Deploy autonomous agent and verify it starts successfully") + deployAgent(ctx, k8sClient, registerCleanup, argov1beta1api.AgentModeAutonomous) + + By("Validate both agents are connected to the principal") + validatePrincipalAndAgentConnection() + + By("Create AppProject for managed agent in " + namespaceAgentPrincipal) + Expect(k8sClient.Create(ctx, buildAppProjectResource(namespaceAgentPrincipal, argov1beta1api.AgentModeManaged))).To(Succeed()) + + By("Create AppProject for autonomous agent in " + namespaceAutonomousAgent) + Expect(k8sClient.Create(ctx, buildAppProjectResource(namespaceAutonomousAgent, argov1beta1api.AgentModeAutonomous))).To(Succeed()) + + By("Deploy application for managed mode") + deployAndValidateApplication(buildApplicationResource(applicationNameManagedAgent, + managedAgentClusterName, managedAgentClusterName, argoCDAgentInstanceNameAgent, argov1beta1api.AgentModeManaged)) + + By("Deploy application for autonomous mode") + deployAndValidateApplication(buildApplicationResource(applicationNameAutonomousAgent, + namespaceAutonomousAgent, autonomousAgentClusterName, argoCDAgentInstanceNameAgent, argov1beta1api.AgentModeAutonomous)) + }) + + AfterEach(func() { + By("Cleanup cluster-scoped resources") + _ = k8sClient.Delete(ctx, clusterRolePrincipal) + _ = k8sClient.Delete(ctx, clusterRoleBindingPrincipal) + + _ = k8sClient.Delete(ctx, clusterRoleManagedAgent) + _ = k8sClient.Delete(ctx, clusterRoleBindingManagedAgent) + + _ = k8sClient.Delete(ctx, clusterRoleAutonomousAgent) + _ = k8sClient.Delete(ctx, clusterRoleBindingAutonomousAgent) + + By("Cleanup namespaces created in this test") + for i := len(cleanupFuncs) - 1; i >= 0; i-- { + cleanupFuncs[i]() + } + }) + + }) +}) + +// This function deploys the principal ArgoCD instance and waits for it to be ready. +// It creates the required secrets for the principal and verifies that the principal deployment is in Ready state. +// It also verifies that the principal logs contain the expected messages. +func deployPrincipal(ctx context.Context, k8sClient client.Client, registerCleanup func(func())) { + GinkgoHelper() + + nsPrincipal, cleanup := fixture.CreateNamespaceWithCleanupFunc(namespaceAgentPrincipal) + registerCleanup(cleanup) + + By("Create ArgoCD instance with principal component enabled") + + argoCDInstance := buildArgoCDResource(argoCDAgentInstanceNamePrincipal, argov1beta1api.AgentComponentTypePrincipal, "", nsPrincipal) + waitForLoadBalancer := true + if !fixture.RunningOnOpenShift() { + argoCDInstance.Spec.ArgoCDAgent.Principal.Server.Service.Type = corev1.ServiceTypeClusterIP + waitForLoadBalancer = false + } + + Expect(k8sClient.Create(ctx, argoCDInstance)).To(Succeed()) + + By("Wait for principal service to be ready and use LoadBalancer hostname/IP when available") + + additionalSANs := []string{} + if waitForLoadBalancer { + principalService := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: deploymentNameAgentPrincipal, + Namespace: nsPrincipal.Name, + }, + } + + err := wait.PollUntilContextTimeout(ctx, 5*time.Second, 3*time.Minute, true, func(ctx context.Context) (bool, error) { + if pollErr := k8sClient.Get(ctx, client.ObjectKeyFromObject(principalService), principalService); pollErr != nil { + return false, nil + } + + for _, ingress := range principalService.Status.LoadBalancer.Ingress { + switch { + case ingress.Hostname != "": + additionalSANs = append(additionalSANs, ingress.Hostname) + return true, nil + case ingress.IP != "": + additionalSANs = append(additionalSANs, ingress.IP) + return true, nil + } + } + return false, nil + }) + if err != nil { + GinkgoWriter.Println("LoadBalancer ingress not available, proceeding without external SANs:", err) + } + } else { + GinkgoWriter.Println("Cluster does not support LoadBalancer services; using in-cluster service DNS SANs only") + } + + By("Create required secrets for principal") + + agentFixture.CreateRequiredSecrets(agentFixture.PrincipalSecretsConfig{ + PrincipalNamespaceName: namespaceAgentPrincipal, + PrincipalServiceName: deploymentNameAgentPrincipal, + ResourceProxyServiceName: fmt.Sprintf("%s-agent-principal-resource-proxy", argoCDAgentInstanceNamePrincipal), + JWTSecretName: agentJWTSecretName, + PrincipalTLSSecretName: agentPrincipalTLSSecretName, + RootCASecretName: agentRootCASecretName, + ResourceProxyTLSSecretName: agentResourceProxyTLSSecretName, + AdditionalPrincipalSANs: additionalSANs, + }) + + By("Verify that principal deployment is in Ready state") + + Eventually(&appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{ + Name: deploymentNameAgentPrincipal, + Namespace: nsPrincipal.Name}}, "120s", "5s").Should(deploymentFixture.HaveReadyReplicas(1)) + + By("Verify principal logs contain expected messages") + + agentFixture.VerifyLogs(deploymentNameAgentPrincipal, nsPrincipal.Name, principalStartupLogs) +} + +// This function deploys the agent ArgoCD instance and waits for it to be ready. +// It creates the required secrets for the agent and verifies that the agent deployment is in Ready state. +// It also verifies that the agent logs contain the expected messages. +func deployAgent(ctx context.Context, k8sClient client.Client, registerCleanup func(func()), agentMode argov1beta1api.AgentMode) { + GinkgoHelper() + + var ( + nsAgent *corev1.Namespace + agentName string + ) + + if agentMode == argov1beta1api.AgentModeManaged { + var cleanup func() + nsAgent, cleanup = fixture.CreateNamespaceWithCleanupFunc(namespaceManagedAgent) + registerCleanup(cleanup) + agentName = managedAgentClusterName + } else { + var cleanup func() + nsAgent, cleanup = fixture.CreateNamespaceWithCleanupFunc(namespaceAutonomousAgent) + registerCleanup(cleanup) + agentName = autonomousAgentClusterName + } + + By("Create required secrets for " + string(agentMode) + " agent") + + agentFixture.CreateRequiredAgentSecrets(agentFixture.AgentSecretsConfig{ + AgentNamespace: nsAgent, + PrincipalNamespaceName: namespaceAgentPrincipal, + PrincipalRootCASecretName: agentRootCASecretName, + AgentRootCASecretName: agentRootCASecretName, + ClientTLSSecretName: agentClientTLSSecretName, + ClientCommonName: agentName, + }) + + By("Create cluster registration secret for " + string(agentMode) + " agent") + + agentFixture.CreateClusterRegistrationSecret(agentFixture.ClusterRegistrationSecretConfig{ + PrincipalNamespaceName: namespaceAgentPrincipal, + AgentNamespaceName: nsAgent.Name, + AgentName: agentName, + ResourceProxyServiceName: fmt.Sprintf("%s-agent-principal-resource-proxy", argoCDAgentInstanceNamePrincipal), + ResourceProxyPort: 9090, + PrincipalRootCASecretName: agentRootCASecretName, + AgentTLSSecretName: agentClientTLSSecretName, + }) + + By("Deploy " + string(agentMode) + " agent ArgoCD instance") + + argoCDInstanceAgent := buildArgoCDResource(argoCDAgentInstanceNameAgent, argov1beta1api.AgentComponentTypeAgent, agentMode, nsAgent) + // Set the principal server address + argoCDInstanceAgent.Spec.ArgoCDAgent.Agent.Client.PrincipalServerAddress = fmt.Sprintf("%s.%s.svc", deploymentNameAgentPrincipal, namespaceAgentPrincipal) + Expect(k8sClient.Create(ctx, argoCDInstanceAgent)).To(Succeed()) + + By("Verifying " + string(agentMode) + " agent deployment is in Ready state") + + Eventually(&appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: deploymentNameAgent, Namespace: nsAgent.Name}}, "120s", "5s"). + Should(deploymentFixture.HaveReadyReplicas(1)) + + By("Verifying " + string(agentMode) + " agent logs contain expected messages") + + agentFixture.VerifyLogs(deploymentNameAgent, nsAgent.Name, agentStartupLogs) +} + +// This function builds the ArgoCD instance for the principal or agent based on the component name. +func buildArgoCDResource(argoCDName string, componentType argov1beta1api.AgentComponentType, + agentMode argov1beta1api.AgentMode, ns *corev1.Namespace) *argov1beta1api.ArgoCD { + + argoCD := &argov1beta1api.ArgoCD{ + ObjectMeta: metav1.ObjectMeta{ + Name: argoCDName, + Namespace: ns.Name, + }, + } + + // Principal configurations + if componentType == argov1beta1api.AgentComponentTypePrincipal { + argoCD.Spec = argov1beta1api.ArgoCDSpec{ + Controller: argov1beta1api.ArgoCDApplicationControllerSpec{ + Enabled: ptr.To(false), + }, + ArgoCDAgent: &argov1beta1api.ArgoCDAgentSpec{ + Principal: &argov1beta1api.PrincipalSpec{ + Enabled: ptr.To(true), + Auth: "mtls:CN=([^,]+)", + LogLevel: "info", + Image: common.ArgoCDAgentPrincipalDefaultImageName, + Namespace: &argov1beta1api.PrincipalNamespaceSpec{ + AllowedNamespaces: []string{ + managedAgentClusterName, + autonomousAgentClusterName, + }, + }, + TLS: &argov1beta1api.PrincipalTLSSpec{ + InsecureGenerate: ptr.To(false), + }, + JWT: &argov1beta1api.PrincipalJWTSpec{ + InsecureGenerate: ptr.To(false), + }, + Server: &argov1beta1api.PrincipalServerSpec{ + KeepAliveMinInterval: "30s", + Route: argov1beta1api.ArgoCDAgentPrincipalRouteSpec{ + Enabled: ptr.To(false), + }, + Service: argov1beta1api.ArgoCDAgentPrincipalServiceSpec{ + Type: corev1.ServiceTypeLoadBalancer, + }, + }, + }, + Agent: &argov1beta1api.AgentSpec{ + Enabled: ptr.To(false), + }, + }, + SourceNamespaces: []string{ + managedAgentClusterName, + autonomousAgentClusterName, + }, + } + } else { + // Agent configurations + argoCD.Spec = argov1beta1api.ArgoCDSpec{ + Server: argov1beta1api.ArgoCDServerSpec{ + Enabled: ptr.To(false), + }, + ArgoCDAgent: &argov1beta1api.ArgoCDAgentSpec{ + Principal: &argov1beta1api.PrincipalSpec{ + Enabled: ptr.To(false), + }, + Agent: &argov1beta1api.AgentSpec{ + Enabled: ptr.To(true), + Creds: "mtls:any", + LogLevel: "info", + Image: common.ArgoCDAgentAgentDefaultImageName, + Client: &argov1beta1api.AgentClientSpec{ + PrincipalServerAddress: "", // will be set in the test + PrincipalServerPort: "443", + KeepAliveInterval: "50s", + Mode: string(agentMode), + }, + Redis: &argov1beta1api.AgentRedisSpec{ + ServerAddress: fmt.Sprintf("%s-%s:%d", argoCDAgentInstanceNameAgent, "redis", common.ArgoCDDefaultRedisPort), + }, + TLS: &argov1beta1api.AgentTLSSpec{ + SecretName: agentClientTLSSecretName, + RootCASecretName: agentRootCASecretName, + Insecure: ptr.To(false), + }, + }, + }, + } + } + + return argoCD +} + +// This function builds the AppProject resource for the managed or autonomous agent. +func buildAppProjectResource(nsName string, agentMode argov1beta1api.AgentMode) *argocdv1alpha1.AppProject { + appProject := &argocdv1alpha1.AppProject{ + ObjectMeta: metav1.ObjectMeta{ + Name: agentAppProjectName, + Namespace: nsName, + }, + Spec: argocdv1alpha1.AppProjectSpec{ + ClusterResourceWhitelist: []metav1.GroupKind{{ + Group: "*", + Kind: "*", + }}, + SourceRepos: []string{"*"}, + }, + } + + if agentMode == argov1beta1api.AgentModeManaged { + appProject.Spec.SourceNamespaces = []string{ + managedAgentClusterName, + autonomousAgentClusterName, + } + appProject.Spec.Destinations = []argocdv1alpha1.ApplicationDestination{{ + Name: managedAgentClusterName, + Namespace: managedAgentApplicationNamespace, + Server: "*", + }} + } else { + appProject.Spec.Destinations = []argocdv1alpha1.ApplicationDestination{{ + Namespace: autonomousAgentApplicationNamespace, + Server: "*", + }} + } + return appProject +} + +// This function builds the Application resource for the managed or autonomous agent. +func buildApplicationResource(applicationName, nsName, agentName, argocdInstanceName string, + agentMode argov1beta1api.AgentMode) *argocdv1alpha1.Application { + + application := &argocdv1alpha1.Application{ + ObjectMeta: metav1.ObjectMeta{ + Name: applicationName, + Namespace: nsName, + }, + Spec: argocdv1alpha1.ApplicationSpec{ + Project: agentAppProjectName, + Source: &argocdv1alpha1.ApplicationSource{ + RepoURL: "https://github.com/redhat-developer/openshift-gitops-getting-started", + TargetRevision: "HEAD", + Path: "app", + }, + SyncPolicy: &argocdv1alpha1.SyncPolicy{ + Automated: &argocdv1alpha1.SyncPolicyAutomated{ + Prune: true, + SelfHeal: true, + }, + ManagedNamespaceMetadata: &argocdv1alpha1.ManagedNamespaceMetadata{ + Labels: map[string]string{ + "argocd.argoproj.io/managed-by": argocdInstanceName, + }, + }, + }, + }, + } + + // Set the destination based on the agent mode + if agentMode == argov1beta1api.AgentModeManaged { + application.Spec.Destination = argocdv1alpha1.ApplicationDestination{ + Name: agentName, + Namespace: managedAgentApplicationNamespace, + } + } else { + application.Spec.Destination = argocdv1alpha1.ApplicationDestination{ + Server: "https://kubernetes.default.svc", + Namespace: autonomousAgentApplicationNamespace, + } + } + return application +} From 66785bf2c0e7642a908d892d40ea4702ab7d77c9 Mon Sep 17 00:00:00 2001 From: Jayendra Parsai Date: Thu, 15 Jan 2026 17:48:22 +0530 Subject: [PATCH 2/2] chore: add e2e tests for agent Assisted by: Cursor Signed-off-by: Jayendra Parsai --- ...51_validate_argocd_agent_principal_test.go | 110 ++++++++++-------- .../1-052_validate_argocd_agent_agent_test.go | 59 +++++----- ...e_argocd_agent_principal_connected_test.go | 10 -- 3 files changed, 93 insertions(+), 86 deletions(-) diff --git a/test/openshift/e2e/ginkgo/sequential/1-051_validate_argocd_agent_principal_test.go b/test/openshift/e2e/ginkgo/sequential/1-051_validate_argocd_agent_principal_test.go index 1daf6518e..a11f2b05b 100644 --- a/test/openshift/e2e/ginkgo/sequential/1-051_validate_argocd_agent_principal_test.go +++ b/test/openshift/e2e/ginkgo/sequential/1-051_validate_argocd_agent_principal_test.go @@ -31,6 +31,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" argov1beta1api "github.com/argoproj-labs/argocd-operator/api/v1beta1" + "github.com/argoproj-labs/argocd-operator/common" "github.com/argoproj-labs/argocd-operator/controllers/argocdagent" "github.com/redhat-developer/gitops-operator/test/openshift/e2e/ginkgo/fixture" agentFixture "github.com/redhat-developer/gitops-operator/test/openshift/e2e/ginkgo/fixture/agent" @@ -41,6 +42,15 @@ import ( ) var _ = Describe("GitOps Operator Sequential E2E Tests", func() { + + const ( + argoCDName = "example" + argoCDAgentPrincipalName = "example-agent-principal" // argoCDName + "-agent-principal" + principalMetricsServiceFmt = "%s-agent-principal-metrics" + principalRedisProxyServiceFmt = "%s-agent-principal-redisproxy" + principalHealthzServiceFmt = "%s-agent-principal-healthz" + ) + Context("1-051_validate_argocd_agent_principal", func() { var ( @@ -75,7 +85,7 @@ var _ = Describe("GitOps Operator Sequential E2E Tests", func() { // Define ArgoCD CR with principal enabled argoCD = &argov1beta1api.ArgoCD{ ObjectMeta: metav1.ObjectMeta{ - Name: argoCDAgentInstanceNamePrincipal, + Name: argoCDName, Namespace: ns.Name, }, Spec: argov1beta1api.ArgoCDSpec{ @@ -113,34 +123,34 @@ var _ = Describe("GitOps Operator Sequential E2E Tests", func() { // Define required resources for principal pod serviceAccount = &corev1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ - Name: deploymentNameAgentPrincipal, + Name: argoCDAgentPrincipalName, Namespace: ns.Name, }, } role = &rbacv1.Role{ ObjectMeta: metav1.ObjectMeta{ - Name: deploymentNameAgentPrincipal, + Name: argoCDAgentPrincipalName, Namespace: ns.Name, }, } roleBinding = &rbacv1.RoleBinding{ ObjectMeta: metav1.ObjectMeta{ - Name: deploymentNameAgentPrincipal, + Name: argoCDAgentPrincipalName, Namespace: ns.Name, }, } clusterRole = &rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%s-%s-agent-principal", argoCDAgentInstanceNamePrincipal, ns.Name), + Name: fmt.Sprintf("%s-%s-agent-principal", argoCDName, ns.Name), }, } clusterRoleBinding = &rbacv1.ClusterRoleBinding{ ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%s-%s-agent-principal", argoCDAgentInstanceNamePrincipal, ns.Name), + Name: fmt.Sprintf("%s-%s-agent-principal", argoCDName, ns.Name), }, } @@ -150,32 +160,32 @@ var _ = Describe("GitOps Operator Sequential E2E Tests", func() { PrincipalTLSSecretName: agentPrincipalTLSSecretName, RootCASecretName: agentRootCASecretName, ResourceProxyTLSSecretName: agentResourceProxyTLSSecretName, - RedisInitialPasswordSecretName: fmt.Sprintf("%s-redis-initial-password", argoCDAgentInstanceNamePrincipal), + RedisInitialPasswordSecretName: "example-redis-initial-password", } - resourceProxyServiceName = fmt.Sprintf("%s-agent-principal-resource-proxy", argoCDAgentInstanceNamePrincipal) + resourceProxyServiceName = fmt.Sprintf("%s-agent-principal-resource-proxy", argoCDName) serviceNames = []string{ - deploymentNameAgentPrincipal, - fmt.Sprintf("%s-agent-principal-metrics", argoCDAgentInstanceNamePrincipal), - fmt.Sprintf("%s-redis", argoCDAgentInstanceNamePrincipal), - fmt.Sprintf("%s-repo-server", argoCDAgentInstanceNamePrincipal), - fmt.Sprintf("%s-server", argoCDAgentInstanceNamePrincipal), - fmt.Sprintf("%s-agent-principal-redisproxy", argoCDAgentInstanceNamePrincipal), + argoCDAgentPrincipalName, + fmt.Sprintf(principalMetricsServiceFmt, argoCDName), + fmt.Sprintf("%s-redis", argoCDName), + fmt.Sprintf("%s-repo-server", argoCDName), + fmt.Sprintf("%s-server", argoCDName), + fmt.Sprintf(principalRedisProxyServiceFmt, argoCDName), resourceProxyServiceName, - fmt.Sprintf("%s-agent-principal-healthz", argoCDAgentInstanceNamePrincipal), + fmt.Sprintf(principalHealthzServiceFmt, argoCDName), } - deploymentNames = []string{fmt.Sprintf("%s-redis", argoCDAgentInstanceNamePrincipal), fmt.Sprintf("%s-repo-server", argoCDAgentInstanceNamePrincipal), fmt.Sprintf("%s-server", argoCDAgentInstanceNamePrincipal)} + deploymentNames = []string{fmt.Sprintf("%s-redis", argoCDName), fmt.Sprintf("%s-repo-server", argoCDName), fmt.Sprintf("%s-server", argoCDName)} principalDeployment = &appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ - Name: deploymentNameAgentPrincipal, + Name: argoCDAgentPrincipalName, Namespace: ns.Name, }, } principalRoute = &routev1.Route{ ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%s-agent-principal", argoCDAgentInstanceNamePrincipal), + Name: fmt.Sprintf("%s-agent-principal", argoCDName), Namespace: ns.Name, }, } @@ -193,7 +203,7 @@ var _ = Describe("GitOps Operator Sequential E2E Tests", func() { argocdagent.EnvArgoCDPrincipalAuth: "mtls:CN=([^,]+)", argocdagent.EnvArgoCDPrincipalEnableResourceProxy: "true", argocdagent.EnvArgoCDPrincipalKeepAliveMinInterval: "30s", - argocdagent.EnvArgoCDPrincipalRedisServerAddress: fmt.Sprintf("%s-%s:%d", argoCDAgentInstanceNamePrincipal, "redis", ArgoCDDefaultRedisPort), + argocdagent.EnvArgoCDPrincipalRedisServerAddress: fmt.Sprintf("%s-%s:%d", argoCDName, "redis", common.ArgoCDDefaultRedisPort), argocdagent.EnvArgoCDPrincipalRedisCompressionType: "gzip", argocdagent.EnvArgoCDPrincipalLogFormat: "text", argocdagent.EnvArgoCDPrincipalEnableWebSocket: "false", @@ -206,8 +216,8 @@ var _ = Describe("GitOps Operator Sequential E2E Tests", func() { principalResources = agentFixture.PrincipalResources{ PrincipalNamespaceName: ns.Name, - ArgoCDAgentPrincipalName: deploymentNameAgentPrincipal, - ArgoCDName: argoCDAgentInstanceNamePrincipal, + ArgoCDAgentPrincipalName: argoCDAgentPrincipalName, + ArgoCDName: argoCDName, ServiceAccount: serviceAccount, Role: role, RoleBinding: roleBinding, @@ -216,11 +226,11 @@ var _ = Describe("GitOps Operator Sequential E2E Tests", func() { PrincipalDeployment: principalDeployment, PrincipalRoute: principalRoute, ServicesToDelete: []string{ - deploymentNameAgentPrincipal, - fmt.Sprintf("%s-agent-principal-metrics", argoCDAgentInstanceNamePrincipal), - fmt.Sprintf("%s-agent-principal-redisproxy", argoCDAgentInstanceNamePrincipal), + argoCDAgentPrincipalName, + fmt.Sprintf(principalMetricsServiceFmt, argoCDName), + fmt.Sprintf(principalRedisProxyServiceFmt, argoCDName), resourceProxyServiceName, - fmt.Sprintf("%s-agent-principal-healthz", argoCDAgentInstanceNamePrincipal), + fmt.Sprintf(principalHealthzServiceFmt, argoCDName), }, } }) @@ -239,7 +249,7 @@ var _ = Describe("GitOps Operator Sequential E2E Tests", func() { createRequiredSecrets := func(namespace *corev1.Namespace, additionalPrincipalSANs ...string) { agentFixture.CreateRequiredSecrets(agentFixture.PrincipalSecretsConfig{ PrincipalNamespaceName: namespace.Name, - PrincipalServiceName: deploymentNameAgentPrincipal, + PrincipalServiceName: argoCDAgentPrincipalName, ResourceProxyServiceName: resourceProxyServiceName, JWTSecretName: secretNames.JWTSecretName, PrincipalTLSSecretName: secretNames.PrincipalTLSSecretName, @@ -257,8 +267,8 @@ var _ = Describe("GitOps Operator Sequential E2E Tests", func() { agentFixture.VerifyExpectedResourcesExist(agentFixture.VerifyExpectedResourcesExistParams{ Namespace: namespace, - ArgoCDAgentPrincipalName: deploymentNameAgentPrincipal, - ArgoCDName: argoCDAgentInstanceNamePrincipal, + ArgoCDAgentPrincipalName: argoCDAgentPrincipalName, + ArgoCDName: argoCDName, ServiceAccount: serviceAccount, Role: role, RoleBinding: roleBinding, @@ -292,7 +302,7 @@ var _ = Describe("GitOps Operator Sequential E2E Tests", func() { By("Verify principal has the custom image we specified in ArgoCD CR") - container := deploymentFixture.GetTemplateSpecContainerByName(deploymentNameAgentPrincipal, *principalDeployment) + container := deploymentFixture.GetTemplateSpecContainerByName(argoCDAgentPrincipalName, *principalDeployment) Expect(container).ToNot(BeNil()) Expect(container.Image).To(Equal("quay.io/user/argocd-agent:v1")) @@ -307,7 +317,7 @@ var _ = Describe("GitOps Operator Sequential E2E Tests", func() { By("Disable principal") - Expect(k8sClient.Get(ctx, client.ObjectKey{Name: argoCDAgentInstanceNamePrincipal, Namespace: ns.Name}, argoCD)).To(Succeed()) + Expect(k8sClient.Get(ctx, client.ObjectKey{Name: argoCDName, Namespace: ns.Name}, argoCD)).To(Succeed()) argocdFixture.Update(argoCD, func(ac *argov1beta1api.ArgoCD) { ac.Spec.ArgoCDAgent.Principal.Enabled = ptr.To(false) @@ -333,9 +343,9 @@ var _ = Describe("GitOps Operator Sequential E2E Tests", func() { By("Verify principal uses the default agent image") - container := deploymentFixture.GetTemplateSpecContainerByName(deploymentNameAgentPrincipal, *principalDeployment) + container := deploymentFixture.GetTemplateSpecContainerByName(argoCDAgentPrincipalName, *principalDeployment) Expect(container).ToNot(BeNil()) - Expect(container.Image).To(Equal(ArgoCDAgentPrincipalDefaultImageName)) + Expect(container.Image).To(Equal(common.ArgoCDAgentPrincipalDefaultImageName)) By("Create required secrets and certificates for principal pod to start properly") @@ -343,7 +353,7 @@ var _ = Describe("GitOps Operator Sequential E2E Tests", func() { By("Verify principal pod starts successfully by checking logs") - agentFixture.VerifyLogs(deploymentNameAgentPrincipal, ns.Name, []string{ + agentFixture.VerifyLogs(argoCDAgentPrincipalName, ns.Name, []string{ "Starting metrics server", "Redis proxy started", "Application informer synced and ready", @@ -370,7 +380,7 @@ var _ = Describe("GitOps Operator Sequential E2E Tests", func() { By("Disable principal") - Expect(k8sClient.Get(ctx, client.ObjectKey{Name: argoCDAgentInstanceNamePrincipal, Namespace: ns.Name}, argoCD)).To(Succeed()) + Expect(k8sClient.Get(ctx, client.ObjectKey{Name: argoCDName, Namespace: ns.Name}, argoCD)).To(Succeed()) argocdFixture.Update(argoCD, func(ac *argov1beta1api.ArgoCD) { ac.Spec.ArgoCDAgent.Principal.Enabled = nil @@ -394,7 +404,7 @@ var _ = Describe("GitOps Operator Sequential E2E Tests", func() { By("Verify principal has the custom image we specified in ArgoCD CR") - container := deploymentFixture.GetTemplateSpecContainerByName(deploymentNameAgentPrincipal, *principalDeployment) + container := deploymentFixture.GetTemplateSpecContainerByName(argoCDAgentPrincipalName, *principalDeployment) Expect(container).ToNot(BeNil()) Expect(container.Image).To(Equal("quay.io/argoprojlabs/argocd-agent:v0.5.0")) @@ -407,7 +417,7 @@ var _ = Describe("GitOps Operator Sequential E2E Tests", func() { By("Update ArgoCD CR with new configuration") - Expect(k8sClient.Get(ctx, client.ObjectKey{Name: argoCDAgentInstanceNamePrincipal, Namespace: ns.Name}, argoCD)).To(Succeed()) + Expect(k8sClient.Get(ctx, client.ObjectKey{Name: argoCDName, Namespace: ns.Name}, argoCD)).To(Succeed()) argocdFixture.Update(argoCD, func(ac *argov1beta1api.ArgoCD) { @@ -453,12 +463,12 @@ var _ = Describe("GitOps Operator Sequential E2E Tests", func() { Eventually( func() bool { // Fetch the latest deployment from the cluster - err := k8sClient.Get(ctx, client.ObjectKey{Name: deploymentNameAgentPrincipal, Namespace: ns.Name}, principalDeployment) + err := k8sClient.Get(ctx, client.ObjectKey{Name: argoCDAgentPrincipalName, Namespace: ns.Name}, principalDeployment) if err != nil { GinkgoWriter.Println("Error getting deployment for image check: ", err) return false } - container = deploymentFixture.GetTemplateSpecContainerByName(deploymentNameAgentPrincipal, *principalDeployment) + container = deploymentFixture.GetTemplateSpecContainerByName(argoCDAgentPrincipalName, *principalDeployment) if container == nil { return false } @@ -553,7 +563,7 @@ var _ = Describe("GitOps Operator Sequential E2E Tests", func() { By("Disable route while keeping principal enabled") - Expect(k8sClient.Get(ctx, client.ObjectKey{Name: argoCDAgentInstanceNamePrincipal, Namespace: ns.Name}, argoCD)).To(Succeed()) + Expect(k8sClient.Get(ctx, client.ObjectKey{Name: argoCDName, Namespace: ns.Name}, argoCD)).To(Succeed()) argocdFixture.Update(argoCD, func(ac *argov1beta1api.ArgoCD) { ac.Spec.ArgoCDAgent.Principal.Server.Route.Enabled = ptr.To(false) }) @@ -569,11 +579,11 @@ var _ = Describe("GitOps Operator Sequential E2E Tests", func() { Eventually(principalDeployment).Should(k8sFixture.ExistByName()) for _, serviceName := range []string{ - fmt.Sprintf("%s-agent-principal", argoCDAgentInstanceNamePrincipal), - fmt.Sprintf("%s-agent-principal-metrics", argoCDAgentInstanceNamePrincipal), - fmt.Sprintf("%s-agent-principal-redisproxy", argoCDAgentInstanceNamePrincipal), - fmt.Sprintf("%s-agent-principal-resource-proxy", argoCDAgentInstanceNamePrincipal), - fmt.Sprintf("%s-agent-principal-healthz", argoCDAgentInstanceNamePrincipal), + fmt.Sprintf("%s-agent-principal", argoCDName), + fmt.Sprintf(principalMetricsServiceFmt, argoCDName), + fmt.Sprintf(principalRedisProxyServiceFmt, argoCDName), + fmt.Sprintf("%s-agent-principal-resource-proxy", argoCDName), + fmt.Sprintf(principalHealthzServiceFmt, argoCDName), } { service := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ @@ -586,7 +596,7 @@ var _ = Describe("GitOps Operator Sequential E2E Tests", func() { By("Re-enable route") - Expect(k8sClient.Get(ctx, client.ObjectKey{Name: argoCDAgentInstanceNamePrincipal, Namespace: ns.Name}, argoCD)).To(Succeed()) + Expect(k8sClient.Get(ctx, client.ObjectKey{Name: argoCDName, Namespace: ns.Name}, argoCD)).To(Succeed()) argocdFixture.Update(argoCD, func(ac *argov1beta1api.ArgoCD) { ac.Spec.ArgoCDAgent.Principal.Server.Route.Enabled = ptr.To(true) }) @@ -615,7 +625,7 @@ var _ = Describe("GitOps Operator Sequential E2E Tests", func() { principalService := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ - Name: deploymentNameAgentPrincipal, + Name: argoCDAgentPrincipalName, Namespace: ns.Name, }, } @@ -640,7 +650,7 @@ var _ = Describe("GitOps Operator Sequential E2E Tests", func() { principalService := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ - Name: deploymentNameAgentPrincipal, + Name: argoCDAgentPrincipalName, Namespace: ns.Name, }, } @@ -665,7 +675,7 @@ var _ = Describe("GitOps Operator Sequential E2E Tests", func() { principalService := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ - Name: deploymentNameAgentPrincipal, + Name: argoCDAgentPrincipalName, Namespace: ns.Name, }, } @@ -674,7 +684,7 @@ var _ = Describe("GitOps Operator Sequential E2E Tests", func() { By("Update service type to LoadBalancer") - Expect(k8sClient.Get(ctx, client.ObjectKey{Name: argoCDAgentInstanceNamePrincipal, Namespace: ns.Name}, argoCD)).To(Succeed()) + Expect(k8sClient.Get(ctx, client.ObjectKey{Name: argoCDName, Namespace: ns.Name}, argoCD)).To(Succeed()) argocdFixture.Update(argoCD, func(ac *argov1beta1api.ArgoCD) { ac.Spec.ArgoCDAgent.Principal.Server.Service.Type = corev1.ServiceTypeLoadBalancer }) @@ -682,7 +692,7 @@ var _ = Describe("GitOps Operator Sequential E2E Tests", func() { By("Verify principal service type is updated to LoadBalancer") Eventually(func() corev1.ServiceType { - err := k8sClient.Get(ctx, client.ObjectKey{Name: deploymentNameAgentPrincipal, Namespace: ns.Name}, principalService) + err := k8sClient.Get(ctx, client.ObjectKey{Name: argoCDAgentPrincipalName, Namespace: ns.Name}, principalService) if err != nil { return "" } diff --git a/test/openshift/e2e/ginkgo/sequential/1-052_validate_argocd_agent_agent_test.go b/test/openshift/e2e/ginkgo/sequential/1-052_validate_argocd_agent_agent_test.go index 743cb4e89..1daf0001a 100644 --- a/test/openshift/e2e/ginkgo/sequential/1-052_validate_argocd_agent_agent_test.go +++ b/test/openshift/e2e/ginkgo/sequential/1-052_validate_argocd_agent_agent_test.go @@ -30,6 +30,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" argov1beta1api "github.com/argoproj-labs/argocd-operator/api/v1beta1" + "github.com/argoproj-labs/argocd-operator/common" "github.com/argoproj-labs/argocd-operator/controllers/argocdagent/agent" "github.com/redhat-developer/gitops-operator/test/openshift/e2e/ginkgo/fixture" argocdFixture "github.com/redhat-developer/gitops-operator/test/openshift/e2e/ginkgo/fixture/argocd" @@ -39,6 +40,12 @@ import ( ) var _ = Describe("GitOps Operator Sequential E2E Tests", func() { + + const ( + argoCDName = "example" + argoCDAgentAgentName = "example-agent-agent" // argoCDName + "-agent-agent" + ) + Context("1-052_validate_argocd_agent_agent", func() { var ( @@ -70,7 +77,7 @@ var _ = Describe("GitOps Operator Sequential E2E Tests", func() { // Define ArgoCD CR with agent enabled argoCD = &argov1beta1api.ArgoCD{ ObjectMeta: metav1.ObjectMeta{ - Name: argoCDAgentInstanceNameAgent, + Name: argoCDName, Namespace: ns.Name, }, Spec: argov1beta1api.ArgoCDSpec{ @@ -100,7 +107,7 @@ var _ = Describe("GitOps Operator Sequential E2E Tests", func() { Insecure: ptr.To(false), }, Redis: &argov1beta1api.AgentRedisSpec{ - ServerAddress: fmt.Sprintf("%s-%s:%d", argoCDAgentInstanceNameAgent, "redis", ArgoCDDefaultRedisPort), + ServerAddress: fmt.Sprintf("%s-%s:%d", argoCDName, "redis", common.ArgoCDDefaultRedisPort), }, }, }, @@ -110,34 +117,34 @@ var _ = Describe("GitOps Operator Sequential E2E Tests", func() { // Define required resources for agent pod serviceAccount = &corev1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ - Name: deploymentNameAgent, + Name: argoCDAgentAgentName, Namespace: ns.Name, }, } role = &rbacv1.Role{ ObjectMeta: metav1.ObjectMeta{ - Name: deploymentNameAgent, + Name: argoCDAgentAgentName, Namespace: ns.Name, }, } roleBinding = &rbacv1.RoleBinding{ ObjectMeta: metav1.ObjectMeta{ - Name: deploymentNameAgent, + Name: argoCDAgentAgentName, Namespace: ns.Name, }, } clusterRole = &rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%s-%s-agent-agent", argoCDAgentInstanceNameAgent, ns.Name), + Name: fmt.Sprintf("%s-%s-agent-agent", argoCDName, ns.Name), }, } clusterRoleBinding = &rbacv1.ClusterRoleBinding{ ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%s-%s-agent-agent", argoCDAgentInstanceNameAgent, ns.Name), + Name: fmt.Sprintf("%s-%s-agent-agent", argoCDName, ns.Name), }, } @@ -145,19 +152,19 @@ var _ = Describe("GitOps Operator Sequential E2E Tests", func() { secretNames = []string{ agentClientTLSSecretName, agentRootCASecretName, - fmt.Sprintf("%s-redis-initial-password", argoCDAgentInstanceNameAgent), + "example-redis-initial-password", } serviceNames = []string{ - fmt.Sprintf("%s-agent-agent-metrics", argoCDAgentInstanceNameAgent), - fmt.Sprintf("%s-agent-agent-healthz", argoCDAgentInstanceNameAgent), - fmt.Sprintf("%s-redis", argoCDAgentInstanceNameAgent), + fmt.Sprintf("%s-agent-agent-metrics", argoCDName), + fmt.Sprintf("%s-agent-agent-healthz", argoCDName), + fmt.Sprintf("%s-redis", argoCDName), } - deploymentNames = []string{fmt.Sprintf("%s-redis", argoCDAgentInstanceNameAgent)} + deploymentNames = []string{fmt.Sprintf("%s-redis", argoCDName)} agentDeployment = &appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ - Name: deploymentNameAgent, + Name: argoCDAgentAgentName, Namespace: ns.Name, }, } @@ -177,7 +184,7 @@ var _ = Describe("GitOps Operator Sequential E2E Tests", func() { agent.EnvArgoCDAgentEnableWebSocket: "false", agent.EnvArgoCDAgentEnableCompression: "false", agent.EnvArgoCDAgentKeepAliveInterval: "30s", - agent.EnvArgoCDAgentRedisAddress: fmt.Sprintf("%s-%s:%d", argoCDAgentInstanceNameAgent, "redis", ArgoCDDefaultRedisPort), + agent.EnvArgoCDAgentRedisAddress: fmt.Sprintf("%s-%s:%d", argoCDName, "redis", common.ArgoCDDefaultRedisPort), agent.EnvArgoCDAgentEnableResourceProxy: "true", } }) @@ -245,8 +252,8 @@ var _ = Describe("GitOps Operator Sequential E2E Tests", func() { Eventually(agentDeployment).Should(k8sFixture.ExistByName()) Eventually(agentDeployment).Should(k8sFixture.HaveLabelWithValue("app.kubernetes.io/component", string(argov1beta1api.AgentComponentTypeAgent))) - Eventually(agentDeployment).Should(k8sFixture.HaveLabelWithValue("app.kubernetes.io/managed-by", argoCDAgentInstanceNameAgent)) - Eventually(agentDeployment).Should(k8sFixture.HaveLabelWithValue("app.kubernetes.io/name", deploymentNameAgent)) + Eventually(agentDeployment).Should(k8sFixture.HaveLabelWithValue("app.kubernetes.io/managed-by", argoCDName)) + Eventually(agentDeployment).Should(k8sFixture.HaveLabelWithValue("app.kubernetes.io/name", argoCDAgentAgentName)) Eventually(agentDeployment).Should(k8sFixture.HaveLabelWithValue("app.kubernetes.io/part-of", "argocd-agent")) } @@ -262,7 +269,7 @@ var _ = Describe("GitOps Operator Sequential E2E Tests", func() { Eventually(clusterRoleBinding).Should(k8sFixture.NotExistByName()) Eventually(agentDeployment).Should(k8sFixture.NotExistByName()) - for _, serviceName := range []string{fmt.Sprintf("%s-agent-agent-metrics", argoCDAgentInstanceNameAgent), fmt.Sprintf("%s-agent-agent-healthz", argoCDAgentInstanceNameAgent)} { + for _, serviceName := range []string{fmt.Sprintf("%s-agent-agent-metrics", argoCDName), fmt.Sprintf("%s-agent-agent-healthz", argoCDName)} { service := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: serviceName, @@ -288,7 +295,7 @@ var _ = Describe("GitOps Operator Sequential E2E Tests", func() { By("Verify agent has the custom image we specified in ArgoCD CR") - container := deploymentFixture.GetTemplateSpecContainerByName(deploymentNameAgent, *agentDeployment) + container := deploymentFixture.GetTemplateSpecContainerByName(argoCDAgentAgentName, *agentDeployment) Expect(container).ToNot(BeNil()) Expect(container.Image).To(Equal("quay.io/user/argocd-agent:v1")) @@ -303,7 +310,7 @@ var _ = Describe("GitOps Operator Sequential E2E Tests", func() { By("Disable agent") - Expect(k8sClient.Get(ctx, client.ObjectKey{Name: argoCDAgentInstanceNameAgent, Namespace: ns.Name}, argoCD)).To(Succeed()) + Expect(k8sClient.Get(ctx, client.ObjectKey{Name: argoCDName, Namespace: ns.Name}, argoCD)).To(Succeed()) argocdFixture.Update(argoCD, func(ac *argov1beta1api.ArgoCD) { ac.Spec.ArgoCDAgent.Agent.Enabled = ptr.To(false) @@ -329,9 +336,9 @@ var _ = Describe("GitOps Operator Sequential E2E Tests", func() { By("Verify agent uses the default agent image") - container := deploymentFixture.GetTemplateSpecContainerByName(deploymentNameAgent, *agentDeployment) + container := deploymentFixture.GetTemplateSpecContainerByName(argoCDAgentAgentName, *agentDeployment) Expect(container).ToNot(BeNil()) - Expect(container.Image).To(Equal(ArgoCDAgentAgentDefaultImageName)) + Expect(container.Image).To(Equal(common.ArgoCDAgentAgentDefaultImageName)) By("Verify environment variables are set correctly") @@ -350,7 +357,7 @@ var _ = Describe("GitOps Operator Sequential E2E Tests", func() { By("Disable agent") - Expect(k8sClient.Get(ctx, client.ObjectKey{Name: argoCDAgentInstanceNameAgent, Namespace: ns.Name}, argoCD)).To(Succeed()) + Expect(k8sClient.Get(ctx, client.ObjectKey{Name: argoCDName, Namespace: ns.Name}, argoCD)).To(Succeed()) argocdFixture.Update(argoCD, func(ac *argov1beta1api.ArgoCD) { ac.Spec.ArgoCDAgent.Agent.Enabled = nil @@ -374,7 +381,7 @@ var _ = Describe("GitOps Operator Sequential E2E Tests", func() { By("Verify agent has the custom image we specified in ArgoCD CR") - container := deploymentFixture.GetTemplateSpecContainerByName(deploymentNameAgent, *agentDeployment) + container := deploymentFixture.GetTemplateSpecContainerByName(argoCDAgentAgentName, *agentDeployment) Expect(container).ToNot(BeNil()) Expect(container.Image).To(Equal("quay.io/argoprojlabs/argocd-agent:v0.5.0")) @@ -387,7 +394,7 @@ var _ = Describe("GitOps Operator Sequential E2E Tests", func() { By("Update ArgoCD CR with new configuration") - Expect(k8sClient.Get(ctx, client.ObjectKey{Name: argoCDAgentInstanceNameAgent, Namespace: ns.Name}, argoCD)).To(Succeed()) + Expect(k8sClient.Get(ctx, client.ObjectKey{Name: argoCDName, Namespace: ns.Name}, argoCD)).To(Succeed()) argocdFixture.Update(argoCD, func(ac *argov1beta1api.ArgoCD) { @@ -414,12 +421,12 @@ var _ = Describe("GitOps Operator Sequential E2E Tests", func() { Eventually( func() bool { // Fetch the latest deployment from the cluster - err := k8sClient.Get(ctx, client.ObjectKey{Name: deploymentNameAgent, Namespace: ns.Name}, agentDeployment) + err := k8sClient.Get(ctx, client.ObjectKey{Name: argoCDAgentAgentName, Namespace: ns.Name}, agentDeployment) if err != nil { GinkgoWriter.Println("Error getting deployment for image check: ", err) return false } - container = deploymentFixture.GetTemplateSpecContainerByName(deploymentNameAgent, *agentDeployment) + container = deploymentFixture.GetTemplateSpecContainerByName(argoCDAgentAgentName, *agentDeployment) if container == nil { return false } diff --git a/test/openshift/e2e/ginkgo/sequential/1-053_validate_argocd_agent_principal_connected_test.go b/test/openshift/e2e/ginkgo/sequential/1-053_validate_argocd_agent_principal_connected_test.go index 216b09c86..8ecc3caed 100644 --- a/test/openshift/e2e/ginkgo/sequential/1-053_validate_argocd_agent_principal_connected_test.go +++ b/test/openshift/e2e/ginkgo/sequential/1-053_validate_argocd_agent_principal_connected_test.go @@ -121,15 +121,6 @@ const ( agentRootCASecretName = "argocd-agent-ca" agentClientTLSSecretName = "argocd-agent-client-tls" agentResourceProxyTLSSecretName = "argocd-agent-resource-proxy-tls" - - // Redis port - ArgoCDDefaultRedisPort = 6379 - - // ArgoCDAgentAgentDefaultImageName is the default image name for the ArgoCD agent's agent component. - ArgoCDAgentAgentDefaultImageName = "quay.io/argoprojlabs/argocd-agent:v0.5.2" - - // ArgoCDAgentPrincipalDefaultImageName is the default image name for the ArgoCD agent's principal component. - ArgoCDAgentPrincipalDefaultImageName = "quay.io/argoprojlabs/argocd-agent:v0.5.2" ) var ( @@ -222,7 +213,6 @@ var _ = Describe("GitOps Operator Sequential E2E Tests", func() { _, cleanupFuncClusterAutonomous := fixture.CreateNamespaceWithCleanupFunc(autonomousAgentClusterName) registerCleanup(cleanupFuncClusterAutonomous) - // Create namespaces with managed-by label for the agent's application controller to deploy resources _, cleanupFuncManagedApplication := fixture.CreateManagedNamespaceWithCleanupFunc(managedAgentApplicationNamespace, namespaceManagedAgent) registerCleanup(cleanupFuncManagedApplication)