From 7ea2f8c750a3b6cfd52c63956290c5dee464762b Mon Sep 17 00:00:00 2001 From: Jakub Koterba Date: Wed, 25 Mar 2026 21:07:35 +0100 Subject: [PATCH 1/2] Added needed infra setup + pgcluster integration test base --- .../controller/postgrescluster_controller.go | 2 + .../postgrescluster_controller_test.go | 263 +++++++++++++++--- internal/controller/suite_test.go | 207 +++++++------- 3 files changed, 320 insertions(+), 152 deletions(-) diff --git a/internal/controller/postgrescluster_controller.go b/internal/controller/postgrescluster_controller.go index dfa1f7eaf..6a2181afc 100644 --- a/internal/controller/postgrescluster_controller.go +++ b/internal/controller/postgrescluster_controller.go @@ -142,6 +142,8 @@ func cnpgPoolerPredicator() predicate.Predicate { } // secretPredicator triggers only on owner reference changes. + +// secretPredicator filters Secret events to trigger reconciles on creation, deletion, or owner reference changes. func secretPredicator() predicate.Predicate { return predicate.Funcs{ CreateFunc: func(event.CreateEvent) bool { return true }, diff --git a/internal/controller/postgrescluster_controller_test.go b/internal/controller/postgrescluster_controller_test.go index c0f3493d9..0f690754b 100644 --- a/internal/controller/postgrescluster_controller_test.go +++ b/internal/controller/postgrescluster_controller_test.go @@ -18,67 +18,248 @@ package controller import ( "context" + "fmt" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/api/resource" + + cnpgv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/reconcile" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" enterprisev4 "github.com/splunk/splunk-operator/api/v4" + "github.com/splunk/splunk-operator/pkg/postgresql/cluster/core" ) var _ = Describe("PostgresCluster Controller", func() { - Context("When reconciling a resource", func() { - const resourceName = "test-resource" - ctx := context.Background() + var ( + ctx context.Context + namespace string + clusterName string + className string + reconciler *PostgresClusterReconciler + req reconcile.Request + cnpg *cnpgv1.Cluster + ) + + BeforeEach(func() { + specLine := CurrentSpecReport().LeafNodeLocation.LineNumber + nameSuffix := fmt.Sprintf("%d-%d-%d", GinkgoParallelProcess(), GinkgoRandomSeed(), specLine) + + ctx = context.Background() + namespace = "default" + clusterName = "postgresql-cluster-dev-" + nameSuffix + className = "postgresql-dev-" + nameSuffix + cnpg = &cnpgv1.Cluster{} + + // Arrange: class defaults used by getMergedConfig() + postgresVersion := "15.10" + instances := int32(2) + storage := resource.MustParse("1Gi") + poolerEnabled := false + + class := &enterprisev4.PostgresClusterClass{ + ObjectMeta: metav1.ObjectMeta{Name: className}, + Spec: enterprisev4.PostgresClusterClassSpec{ + Provisioner: "postgresql.cnpg.io", + Config: &enterprisev4.PostgresClusterClassConfig{ + Instances: &instances, + Storage: &storage, + PostgresVersion: &postgresVersion, + ConnectionPoolerEnabled: &poolerEnabled, + }, + }, + } + Expect(k8sClient.Create(ctx, class)).To(Succeed()) + + pc := &enterprisev4.PostgresCluster{ + ObjectMeta: metav1.ObjectMeta{Name: clusterName, Namespace: namespace}, + Spec: enterprisev4.PostgresClusterSpec{ + Class: className, + ClusterDeletionPolicy: &[]string{"Delete"}[0], + }, + } + Expect(k8sClient.Create(ctx, pc)).To(Succeed()) + + reconciler = &PostgresClusterReconciler{ + Client: k8sClient, + Scheme: k8sClient.Scheme(), + } + req = reconcile.Request{NamespacedName: types.NamespacedName{Name: clusterName, Namespace: namespace}} + }) + + JustBeforeEach(func() { + By("Reconciling the created resource") + result, err := reconciler.Reconcile(ctx, req) + Expect(err).NotTo(HaveOccurred()) + Expect(result.RequeueAfter).To(BeZero()) + }) - typeNamespacedName := types.NamespacedName{ - Name: resourceName, - Namespace: "default", // TODO(user):Modify as needed + AfterEach(func() { + By("Deleting PostgresCluster and letting reconcile run finalizer cleanup") + key := types.NamespacedName{Name: clusterName, Namespace: namespace} + pc := &enterprisev4.PostgresCluster{} + + // Best-effort delete (object might already be gone in some specs) + err := k8sClient.Get(ctx, key, pc) + if err == nil { + Expect(k8sClient.Delete(ctx, pc)).To(Succeed()) + } else { + Expect(apierrors.IsNotFound(err)).To(BeTrue()) } - postgresCluster := &enterprisev4.PostgresCluster{} - - BeforeEach(func() { - By("creating the custom resource for the Kind PostgresCluster") - err := k8sClient.Get(ctx, typeNamespacedName, postgresCluster) - if err != nil && errors.IsNotFound(err) { - resource := &enterprisev4.PostgresCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: resourceName, - Namespace: "default", - }, - // TODO(user): Specify other spec details if needed. + + // Drive delete reconcile path until finalizer is removed and object disappears + Eventually(func() bool { + _, recErr := reconciler.Reconcile(ctx, req) + if recErr != nil { + // Some envtest runs may not have CNPG CRDs installed in the API server. + // In that case, remove finalizer directly so fixture teardown remains deterministic. + if meta.IsNoMatchError(recErr) { + current := &enterprisev4.PostgresCluster{} + getErr := k8sClient.Get(ctx, key, current) + if apierrors.IsNotFound(getErr) { + return true + } + if getErr != nil { + return false + } + controllerutil.RemoveFinalizer(current, core.PostgresClusterFinalizerName) + if err := k8sClient.Update(ctx, current); err != nil && !apierrors.IsNotFound(err) { + return false + } + if err := k8sClient.Delete(ctx, current); err != nil && !apierrors.IsNotFound(err) { + return false + } + } else { + return false } - Expect(k8sClient.Create(ctx, resource)).To(Succeed()) } - }) + getErr := k8sClient.Get(ctx, key, &enterprisev4.PostgresCluster{}) + return apierrors.IsNotFound(getErr) + }, "10s", "500ms").Should(BeTrue()) - AfterEach(func() { - // TODO(user): Cleanup logic after each test, like removing the resource instance. - resource := &enterprisev4.PostgresCluster{} - err := k8sClient.Get(ctx, typeNamespacedName, resource) - Expect(err).NotTo(HaveOccurred()) + By("Cleaning up PostgresClusterClass fixture") + class := &enterprisev4.PostgresClusterClass{} + classKey := types.NamespacedName{Name: className} // cluster-scoped CR + err = k8sClient.Get(ctx, classKey, class) + if err == nil { + Expect(k8sClient.Delete(ctx, class)).To(Succeed()) + } else { + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + } + }) - By("Cleanup the specific resource instance PostgresCluster") - Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) - }) - It("should successfully reconcile the resource", func() { - By("Reconciling the created resource") - controllerReconciler := &PostgresClusterReconciler{ - Client: k8sClient, - Scheme: k8sClient.Scheme(), - } + Context("Happy path and convergence", func() { + pc := &enterprisev4.PostgresCluster{} + It("PC-01 creates managed resources and status refs", func() { + By("creating CNPG cluster via reconcile and avaiting healthy") + Eventually(func() error { + _, err := reconciler.Reconcile(ctx, req) + if err != nil { + return err + } + if err := k8sClient.Get(ctx, req.NamespacedName, cnpg); err != nil { + return err + } + cnpg.Status.Phase = cnpgv1.PhaseHealthy + return k8sClient.Status().Update(ctx, cnpg) + }, "10s", "250ms").Should(Succeed()) + + By("reconciling until managed resources are published in status") + Eventually(func() bool { + _, err := reconciler.Reconcile(ctx, req) + if err != nil { + return false + } + current := &enterprisev4.PostgresCluster{} + if err := k8sClient.Get(ctx, req.NamespacedName, current); err != nil { + return false + } + return current.Status.Resources != nil && + current.Status.Resources.SuperUserSecretRef != nil && + current.Status.Resources.ConfigMapRef != nil + }, "20s", "250ms").Should(BeTrue()) - _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ - NamespacedName: typeNamespacedName, - }) - Expect(err).NotTo(HaveOccurred()) - // TODO(user): Add more specific assertions depending on your controller's reconciliation logic. - // Example: If you expect a certain status condition after reconciliation, verify it here. + By("asserting finalizer contract") + pc := &enterprisev4.PostgresCluster{} + Expect(k8sClient.Get(ctx, req.NamespacedName, pc)).To(Succeed()) + Expect(controllerutil.ContainsFinalizer(pc, core.PostgresClusterFinalizerName)).To(BeTrue()) + + By("asserting status references are published") + Expect(pc.Status.Resources).NotTo(BeNil()) + Expect(pc.Status.Resources.SuperUserSecretRef).NotTo(BeNil()) + Expect(pc.Status.Resources.ConfigMapRef).NotTo(BeNil()) + + By("asserting Secret ownership and existence") + secret := &corev1.Secret{} + Expect(k8sClient.Get(ctx, types.NamespacedName{ + Name: pc.Status.Resources.SuperUserSecretRef.Name, Namespace: namespace, + }, secret)).To(Succeed()) + Expect(metav1.IsControlledBy(secret, pc)).To(BeTrue()) + + By("asserting CNPG Cluster projection and ownership") + cnpg := &cnpgv1.Cluster{} + Expect(k8sClient.Get(ctx, req.NamespacedName, cnpg)).To(Succeed()) + Expect(metav1.IsControlledBy(cnpg, pc)).To(BeTrue()) + Expect(cnpg.Spec.Instances).To(Equal(2)) + Expect(cnpg.Spec.ImageName).To(ContainSubstring("postgresql:15.10")) + Expect(cnpg.Spec.StorageConfiguration.Size).To(Equal("1Gi")) + + By("asserting ConfigMap contract consumed by clients") + cm := &corev1.ConfigMap{} + Expect(k8sClient.Get(ctx, types.NamespacedName{ + Name: pc.Status.Resources.ConfigMapRef.Name, Namespace: namespace, + }, cm)).To(Succeed()) + Expect(metav1.IsControlledBy(cm, pc)).To(BeTrue()) + Expect(cm.Data).To(HaveKeyWithValue("DEFAULT_CLUSTER_PORT", "5432")) + Expect(cm.Data).To(HaveKey("SUPER_USER_SECRET_REF")) + Expect(cm.Data).To(HaveKey("CLUSTER_RW_ENDPOINT")) + }) + It("PC-02 adds finalizer on reconcile", func() { + Expect(k8sClient.Get(ctx, req.NamespacedName, pc)).To(Succeed()) + Expect(pc.ObjectMeta.Finalizers).To(ContainElement(core.PostgresClusterFinalizerName)) }) + It("PC-07 is idempotent across repeated reconciles", func() {}) }) + + Context("Deletion and finalizer", func() { + It("PC-03 Delete policy removes children and finalizer", func() {}) + It("PC-04 Retain policy preserves children and removes ownerRefs", func() {}) + }) + + Context("Failure and drift", func() { + It("PC-05 fails when PostgresClusterClass is missing", func() {}) + It("PC-06 restores drifted managed spec", func() {}) + }) + + Context("Predicates", func() { + It("PC-08 triggers on generation/finalizer/deletion changes", func() {}) + It("PC-09 ignores no-op updates", func() {}) + }) + + // Context("When reconciling a resource", func() { + + // It("should successfully reconcile the resource", func() { + // By("Reconciling the created resource") + // // controllerReconciler := &PostgresClusterReconciler{ + // // Client: k8sClient, + // // Scheme: k8sClient.Scheme(), + // // } + + // // _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ + // // NamespacedName: typeNamespacedName, + // // }) + // err := errors.New("test error") + // Expect(err).NotTo(HaveOccurred()) + + // }) + // }) }) diff --git a/internal/controller/suite_test.go b/internal/controller/suite_test.go index 142a8720c..30cb99f64 100644 --- a/internal/controller/suite_test.go +++ b/internal/controller/suite_test.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2018-2022 Splunk Inc. All rights reserved. +Copyright 2026. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,155 +19,140 @@ package controller import ( "context" "fmt" + "os" "path/filepath" + "sort" "testing" - "time" + cnpgv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "go.uber.org/zap/zapcore" + + "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/envtest" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" - clientgoscheme "k8s.io/client-go/kubernetes/scheme" - ctrl "sigs.k8s.io/controller-runtime" - - enterpriseApiV3 "github.com/splunk/splunk-operator/api/v3" - enterpriseApi "github.com/splunk/splunk-operator/api/v4" - //+kubebuilder:scaffold:imports + enterprisev4 "github.com/splunk/splunk-operator/api/v4" + // +kubebuilder:scaffold:imports ) -var cfg *rest.Config -var k8sClient client.Client -var testEnv *envtest.Environment -var k8sManager ctrl.Manager +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. -func TestAPIs(t *testing.T) { +var ( + ctx context.Context + cancel context.CancelFunc + testEnv *envtest.Environment + cfg *rest.Config + k8sClient client.Client +) + +func TestControllers(t *testing.T) { RegisterFailHandler(Fail) RunSpecs(t, "Controller Suite") } -var _ = BeforeSuite(func(ctx context.Context) { - opts := zap.Options{ - Development: true, - TimeEncoder: zapcore.RFC3339NanoTimeEncoder, - } - logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true), zap.UseFlagOptions(&opts))) +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) - By("bootstrapping test environment") + ctx, cancel = context.WithCancel(context.TODO()) + + var err error + err = enterprisev4.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + err = cnpgv1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + // +kubebuilder:scaffold:scheme + By("bootstrapping test environment") + cnpgCRDDirectory, err := getCNPGCRDDirectory() + Expect(err).NotTo(HaveOccurred()) testEnv = &envtest.Environment{ - CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")}, + CRDDirectoryPaths: []string{ + filepath.Join("..", "..", "config", "crd", "bases"), + cnpgCRDDirectory, + }, ErrorIfCRDPathMissing: true, } - var err error + // Retrieve the first found binary directory to allow running tests from IDEs + if getFirstFoundEnvTestBinaryDir() != "" { + testEnv.BinaryAssetsDirectory = getFirstFoundEnvTestBinaryDir() + } // cfg is defined in this file globally. cfg, err = testEnv.Start() Expect(err).NotTo(HaveOccurred()) Expect(cfg).NotTo(BeNil()) - err = enterpriseApi.AddToScheme(clientgoscheme.Scheme) - Expect(err).NotTo(HaveOccurred()) - - err = enterpriseApiV3.AddToScheme(clientgoscheme.Scheme) - Expect(err).NotTo(HaveOccurred()) - - err = enterpriseApi.AddToScheme(clientgoscheme.Scheme) - Expect(err).NotTo(HaveOccurred()) - - err = enterpriseApi.AddToScheme(clientgoscheme.Scheme) - Expect(err).NotTo(HaveOccurred()) - - err = enterpriseApiV3.AddToScheme(clientgoscheme.Scheme) + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient).NotTo(BeNil()) +}) - err = enterpriseApi.AddToScheme(clientgoscheme.Scheme) +var _ = AfterSuite(func() { + By("tearing down the test environment") + cancel() + err := testEnv.Stop() Expect(err).NotTo(HaveOccurred()) +}) - //+kubebuilder:scaffold:scheme - - // Create New Manager for controller - k8sManager, err = ctrl.NewManager(cfg, ctrl.Options{ - Scheme: clientgoscheme.Scheme, - }) - Expect(err).ToNot(HaveOccurred()) - if err := (&ClusterManagerReconciler{ - Client: k8sManager.GetClient(), - Scheme: k8sManager.GetScheme(), - }).SetupWithManager(k8sManager); err != nil { - Expect(err).NotTo(HaveOccurred()) +// getFirstFoundEnvTestBinaryDir locates the first binary in the specified path. +// ENVTEST-based tests depend on specific binaries, usually located in paths set by +// controller-runtime. When running tests directly (e.g., via an IDE) without using +// Makefile targets, the 'BinaryAssetsDirectory' must be explicitly configured. +// +// This function streamlines the process by finding the required binaries, similar to +// setting the 'KUBEBUILDER_ASSETS' environment variable. To ensure the binaries are +// properly set up, run 'make setup-envtest' beforehand. +func getFirstFoundEnvTestBinaryDir() string { + basePath := filepath.Join("..", "..", "bin", "k8s") + entries, err := os.ReadDir(basePath) + if err != nil { + logf.Log.Error(err, "Failed to read directory", "path", basePath) + return "" } - if err := (&ClusterMasterReconciler{ - Client: k8sManager.GetClient(), - Scheme: k8sManager.GetScheme(), - }).SetupWithManager(k8sManager); err != nil { - Expect(err).NotTo(HaveOccurred()) + for _, entry := range entries { + if entry.IsDir() { + return filepath.Join(basePath, entry.Name()) + } } - if err := (&IndexerClusterReconciler{ - Client: k8sManager.GetClient(), - Scheme: k8sManager.GetScheme(), - }).SetupWithManager(k8sManager); err != nil { - Expect(err).NotTo(HaveOccurred()) - } - if err := (&IngestorClusterReconciler{ - Client: k8sManager.GetClient(), - Scheme: k8sManager.GetScheme(), - }).SetupWithManager(k8sManager); err != nil { - Expect(err).NotTo(HaveOccurred()) - } - if err := (&LicenseManagerReconciler{ - Client: k8sManager.GetClient(), - Scheme: k8sManager.GetScheme(), - }).SetupWithManager(k8sManager); err != nil { - Expect(err).NotTo(HaveOccurred()) - } - if err := (&LicenseMasterReconciler{ - Client: k8sManager.GetClient(), - Scheme: k8sManager.GetScheme(), - }).SetupWithManager(k8sManager); err != nil { - Expect(err).NotTo(HaveOccurred()) + return "" +} + +func getCNPGCRDDirectory() (string, error) { + // Optional escape hatch for CI/local overrides. + if explicit := os.Getenv("CNPG_CRD_DIR"); explicit != "" { + return explicit, nil } - if err := (&MonitoringConsoleReconciler{ - Client: k8sManager.GetClient(), - Scheme: k8sManager.GetScheme(), - }).SetupWithManager(k8sManager); err != nil { - Expect(err).NotTo(HaveOccurred()) + + moduleRoot := os.Getenv("GOMODCACHE") + if moduleRoot == "" { + gopath := os.Getenv("GOPATH") + if gopath == "" { + home, err := os.UserHomeDir() + if err != nil { + return "", err + } + gopath = filepath.Join(home, "go") + } + moduleRoot = filepath.Join(gopath, "pkg", "mod") } - if err := (&SearchHeadClusterReconciler{ - Client: k8sManager.GetClient(), - Scheme: k8sManager.GetScheme(), - }).SetupWithManager(k8sManager); err != nil { - Expect(err).NotTo(HaveOccurred()) + + pattern := filepath.Join(moduleRoot, "github.com", "cloudnative-pg", "cloudnative-pg@*", "config", "crd", "bases") + matches, err := filepath.Glob(pattern) + if err != nil { + return "", err } - if err := (&StandaloneReconciler{ - Client: k8sManager.GetClient(), - Scheme: k8sManager.GetScheme(), - }).SetupWithManager(k8sManager); err != nil { - Expect(err).NotTo(HaveOccurred()) + if len(matches) == 0 { + return "", fmt.Errorf("CNPG CRD directory not found; set CNPG_CRD_DIR or download github.com/cloudnative-pg/cloudnative-pg module") } - go func() { - err = k8sManager.Start(ctrl.SetupSignalHandler()) - fmt.Printf("error %v", err.Error()) - Expect(err).ToNot(HaveOccurred()) - }() - - Expect(err).ToNot(HaveOccurred()) - - k8sClient, err = client.New(cfg, client.Options{Scheme: clientgoscheme.Scheme}) - Expect(err).NotTo(HaveOccurred()) - Expect(k8sClient).NotTo(BeNil()) - -}, NodeTimeout(time.Second*500)) - -var _ = AfterSuite(func() { - By("tearing down the test environment") - testEnv.Stop() -}) + sort.Strings(matches) + return matches[len(matches)-1], nil +} From b5bed4daea749397f4df0a31371d81fd8faec46e Mon Sep 17 00:00:00 2001 From: Jakub Koterba Date: Mon, 30 Mar 2026 20:19:44 +0200 Subject: [PATCH 2/2] suite_test revert and simplify, cover more reconcile cases --- .../controller/postgrescluster_controller.go | 2 - .../postgrescluster_controller_test.go | 382 +++++++++++------- internal/controller/suite_test.go | 223 +++++----- 3 files changed, 368 insertions(+), 239 deletions(-) diff --git a/internal/controller/postgrescluster_controller.go b/internal/controller/postgrescluster_controller.go index 6a2181afc..dfa1f7eaf 100644 --- a/internal/controller/postgrescluster_controller.go +++ b/internal/controller/postgrescluster_controller.go @@ -142,8 +142,6 @@ func cnpgPoolerPredicator() predicate.Predicate { } // secretPredicator triggers only on owner reference changes. - -// secretPredicator filters Secret events to trigger reconciles on creation, deletion, or owner reference changes. func secretPredicator() predicate.Predicate { return predicate.Funcs{ CreateFunc: func(event.CreateEvent) bool { return true }, diff --git a/internal/controller/postgrescluster_controller_test.go b/internal/controller/postgrescluster_controller_test.go index 0f690754b..d405a35bf 100644 --- a/internal/controller/postgrescluster_controller_test.go +++ b/internal/controller/postgrescluster_controller_test.go @@ -19,8 +19,9 @@ package controller import ( "context" "fmt" + "strconv" + "time" - corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/api/resource" @@ -38,56 +39,81 @@ import ( "github.com/splunk/splunk-operator/pkg/postgresql/cluster/core" ) +/* +* Test cases: +* PC-01 creates managed resources and status refs +* PC-02 adds finalizer on reconcile +* PC-07 is idempotent across repeated reconciles +* PC-03 Delete policy removes children and finalizer +* PC-04 Retain policy preserves children and removes ownerRefs +* PC-05 fails when PostgresClusterClass is missing +* PC-06 restores drifted managed spec +* PC-08 triggers on generation/finalizer/deletion changes +* PC-09 ignores no-op updates + */ + var _ = Describe("PostgresCluster Controller", func() { + const ( + postgresVersion = "15.10" + clusterMemberCount = int32(2) + storageAmount = "1Gi" + poolerEnabled = false + deletePolicy = "Delete" + retainPolicy = "Retain" + namespace = "default" + classNamePrefix = "postgresql-dev-" + clusterNamePrefix = "postgresql-cluster-dev-" + provisioner = "postgresql.cnpg.io" + ) + var ( - ctx context.Context - namespace string - clusterName string - className string - reconciler *PostgresClusterReconciler - req reconcile.Request - cnpg *cnpgv1.Cluster + ctx context.Context + clusterName string + className string + pgCluster *enterprisev4.PostgresCluster + pgClusterClass *enterprisev4.PostgresClusterClass + pgClusterKey types.NamespacedName + pgClusterClassKey types.NamespacedName + reconciler *PostgresClusterReconciler + req reconcile.Request ) BeforeEach(func() { - specLine := CurrentSpecReport().LeafNodeLocation.LineNumber - nameSuffix := fmt.Sprintf("%d-%d-%d", GinkgoParallelProcess(), GinkgoRandomSeed(), specLine) + nameSuffix := fmt.Sprintf("%d-%d-%d", + GinkgoParallelProcess(), + GinkgoRandomSeed(), + CurrentSpecReport().LeafNodeLocation.LineNumber, + ) ctx = context.Background() - namespace = "default" - clusterName = "postgresql-cluster-dev-" + nameSuffix - className = "postgresql-dev-" + nameSuffix - cnpg = &cnpgv1.Cluster{} - - // Arrange: class defaults used by getMergedConfig() - postgresVersion := "15.10" - instances := int32(2) - storage := resource.MustParse("1Gi") - poolerEnabled := false - - class := &enterprisev4.PostgresClusterClass{ + clusterName = clusterNamePrefix + nameSuffix + className = classNamePrefix + nameSuffix + pgClusterKey = types.NamespacedName{Name: clusterName, Namespace: namespace} + pgClusterClassKey = types.NamespacedName{Name: className, Namespace: namespace} + + pgClusterClass = &enterprisev4.PostgresClusterClass{ ObjectMeta: metav1.ObjectMeta{Name: className}, Spec: enterprisev4.PostgresClusterClassSpec{ - Provisioner: "postgresql.cnpg.io", + Provisioner: provisioner, Config: &enterprisev4.PostgresClusterClassConfig{ - Instances: &instances, - Storage: &storage, - PostgresVersion: &postgresVersion, - ConnectionPoolerEnabled: &poolerEnabled, + Instances: &[]int32{clusterMemberCount}[0], + Storage: &[]resource.Quantity{resource.MustParse(storageAmount)}[0], + PostgresVersion: &[]string{postgresVersion}[0], + ConnectionPoolerEnabled: &[]bool{poolerEnabled}[0], }, }, } - Expect(k8sClient.Create(ctx, class)).To(Succeed()) - pc := &enterprisev4.PostgresCluster{ + Expect(k8sClient.Create(ctx, pgClusterClass)).To(Succeed()) + + pgCluster = &enterprisev4.PostgresCluster{ ObjectMeta: metav1.ObjectMeta{Name: clusterName, Namespace: namespace}, Spec: enterprisev4.PostgresClusterSpec{ Class: className, - ClusterDeletionPolicy: &[]string{"Delete"}[0], + ClusterDeletionPolicy: &[]string{deletePolicy}[0], }, } - Expect(k8sClient.Create(ctx, pc)).To(Succeed()) reconciler = &PostgresClusterReconciler{ Client: k8sClient, @@ -96,22 +122,13 @@ var _ = Describe("PostgresCluster Controller", func() { req = reconcile.Request{NamespacedName: types.NamespacedName{Name: clusterName, Namespace: namespace}} }) - JustBeforeEach(func() { - By("Reconciling the created resource") - result, err := reconciler.Reconcile(ctx, req) - Expect(err).NotTo(HaveOccurred()) - Expect(result.RequeueAfter).To(BeZero()) - }) - AfterEach(func() { By("Deleting PostgresCluster and letting reconcile run finalizer cleanup") - key := types.NamespacedName{Name: clusterName, Namespace: namespace} - pc := &enterprisev4.PostgresCluster{} // Best-effort delete (object might already be gone in some specs) - err := k8sClient.Get(ctx, key, pc) + err := k8sClient.Get(ctx, pgClusterKey, pgCluster) if err == nil { - Expect(k8sClient.Delete(ctx, pc)).To(Succeed()) + Expect(k8sClient.Delete(ctx, pgCluster)).To(Succeed()) } else { Expect(apierrors.IsNotFound(err)).To(BeTrue()) } @@ -124,7 +141,7 @@ var _ = Describe("PostgresCluster Controller", func() { // In that case, remove finalizer directly so fixture teardown remains deterministic. if meta.IsNoMatchError(recErr) { current := &enterprisev4.PostgresCluster{} - getErr := k8sClient.Get(ctx, key, current) + getErr := k8sClient.Get(ctx, pgClusterKey, current) if apierrors.IsNotFound(getErr) { return true } @@ -142,124 +159,203 @@ var _ = Describe("PostgresCluster Controller", func() { return false } } - getErr := k8sClient.Get(ctx, key, &enterprisev4.PostgresCluster{}) + getErr := k8sClient.Get(ctx, pgClusterKey, &enterprisev4.PostgresCluster{}) return apierrors.IsNotFound(getErr) }, "10s", "500ms").Should(BeTrue()) By("Cleaning up PostgresClusterClass fixture") - class := &enterprisev4.PostgresClusterClass{} - classKey := types.NamespacedName{Name: className} // cluster-scoped CR - err = k8sClient.Get(ctx, classKey, class) + err = k8sClient.Get(ctx, pgClusterClassKey, pgClusterClass) if err == nil { - Expect(k8sClient.Delete(ctx, class)).To(Succeed()) + Expect(k8sClient.Delete(ctx, pgClusterClass)).To(Succeed()) } else { Expect(apierrors.IsNotFound(err)).To(BeTrue()) } }) - Context("Happy path and convergence", func() { - pc := &enterprisev4.PostgresCluster{} - It("PC-01 creates managed resources and status refs", func() { - By("creating CNPG cluster via reconcile and avaiting healthy") - Eventually(func() error { - _, err := reconciler.Reconcile(ctx, req) - if err != nil { - return err - } - if err := k8sClient.Get(ctx, req.NamespacedName, cnpg); err != nil { - return err - } - cnpg.Status.Phase = cnpgv1.PhaseHealthy - return k8sClient.Status().Update(ctx, cnpg) - }, "10s", "250ms").Should(Succeed()) - - By("reconciling until managed resources are published in status") - Eventually(func() bool { - _, err := reconciler.Reconcile(ctx, req) - if err != nil { - return false - } - current := &enterprisev4.PostgresCluster{} - if err := k8sClient.Get(ctx, req.NamespacedName, current); err != nil { - return false - } - return current.Status.Resources != nil && - current.Status.Resources.SuperUserSecretRef != nil && - current.Status.Resources.ConfigMapRef != nil - }, "20s", "250ms").Should(BeTrue()) - - By("asserting finalizer contract") - pc := &enterprisev4.PostgresCluster{} - Expect(k8sClient.Get(ctx, req.NamespacedName, pc)).To(Succeed()) - Expect(controllerutil.ContainsFinalizer(pc, core.PostgresClusterFinalizerName)).To(BeTrue()) - - By("asserting status references are published") - Expect(pc.Status.Resources).NotTo(BeNil()) - Expect(pc.Status.Resources.SuperUserSecretRef).NotTo(BeNil()) - Expect(pc.Status.Resources.ConfigMapRef).NotTo(BeNil()) - - By("asserting Secret ownership and existence") - secret := &corev1.Secret{} - Expect(k8sClient.Get(ctx, types.NamespacedName{ - Name: pc.Status.Resources.SuperUserSecretRef.Name, Namespace: namespace, - }, secret)).To(Succeed()) - Expect(metav1.IsControlledBy(secret, pc)).To(BeTrue()) - - By("asserting CNPG Cluster projection and ownership") - cnpg := &cnpgv1.Cluster{} - Expect(k8sClient.Get(ctx, req.NamespacedName, cnpg)).To(Succeed()) - Expect(metav1.IsControlledBy(cnpg, pc)).To(BeTrue()) - Expect(cnpg.Spec.Instances).To(Equal(2)) - Expect(cnpg.Spec.ImageName).To(ContainSubstring("postgresql:15.10")) - Expect(cnpg.Spec.StorageConfiguration.Size).To(Equal("1Gi")) - - By("asserting ConfigMap contract consumed by clients") - cm := &corev1.ConfigMap{} - Expect(k8sClient.Get(ctx, types.NamespacedName{ - Name: pc.Status.Resources.ConfigMapRef.Name, Namespace: namespace, - }, cm)).To(Succeed()) - Expect(metav1.IsControlledBy(cm, pc)).To(BeTrue()) - Expect(cm.Data).To(HaveKeyWithValue("DEFAULT_CLUSTER_PORT", "5432")) - Expect(cm.Data).To(HaveKey("SUPER_USER_SECRET_REF")) - Expect(cm.Data).To(HaveKey("CLUSTER_RW_ENDPOINT")) - }) - It("PC-02 adds finalizer on reconcile", func() { - Expect(k8sClient.Get(ctx, req.NamespacedName, pc)).To(Succeed()) - Expect(pc.ObjectMeta.Finalizers).To(ContainElement(core.PostgresClusterFinalizerName)) + When("under typical usage and expecting healthy PostgresCluster state", func() { + Context("when reconciling", func() { + // PC-02 + It("adds finalizer on reconcile", func() { + Expect(k8sClient.Create(ctx, pgCluster)).To(Succeed()) + + Eventually(func() bool { + pc := &enterprisev4.PostgresCluster{} + if err := k8sClient.Get(ctx, pgClusterKey, pc); err != nil { + return false + } + return controllerutil.ContainsFinalizer(pc, core.PostgresClusterFinalizerName) + }, "10s", "250ms").Should(BeTrue()) + }) + + // PC-01 + It("creates managed resources and status refs", func() { + Expect(k8sClient.Create(ctx, pgCluster)).To(Succeed()) + + Eventually(func(g Gomega) { + pc := &enterprisev4.PostgresCluster{} + g.Expect(k8sClient.Get(ctx, pgClusterKey, pc)).To(Succeed()) + + cond := meta.FindStatusCondition(pc.Status.Conditions, "ClusterReady") + g.Expect(cond).NotTo(BeNil()) + g.Expect(cond.Status).To(Equal(metav1.ConditionFalse)) + g.Expect(cond.Reason).To(Equal("CNPGClusterProvisioning")) + }, "20s", "250ms").Should(Succeed()) + + // Simulate external CNPG controller status progression. + Eventually(func() error { + cnpg := &cnpgv1.Cluster{} + if err := k8sClient.Get(ctx, pgClusterKey, cnpg); err != nil { + return err + } + cnpg.Status.Phase = cnpgv1.PhaseHealthy + return k8sClient.Status().Update(ctx, cnpg) // update event + }, "10s", "250ms").Should(Succeed()) + + // Expect cnpg status progression propagation + Eventually(func(g Gomega) { + pc := &enterprisev4.PostgresCluster{} + g.Expect(k8sClient.Get(ctx, pgClusterKey, pc)).To(Succeed()) + + cond := meta.FindStatusCondition(pc.Status.Conditions, "ClusterReady") + g.Expect(cond).NotTo(BeNil()) + g.Expect(cond.Status).To(Equal(metav1.ConditionTrue)) + g.Expect(cond.Reason).To(Equal("CNPGClusterHealthy")) + }, "20s", "250ms").Should(Succeed()) + + Eventually(func(g Gomega) { + pc := &enterprisev4.PostgresCluster{} + g.Expect(k8sClient.Get(ctx, pgClusterKey, pc)).To(Succeed()) + g.Expect(pc.Status.Resources).NotTo(BeNil()) + g.Expect(pc.Status.Resources.SuperUserSecretRef).NotTo(BeNil()) + g.Expect(pc.Status.Resources.ConfigMapRef).NotTo(BeNil()) + }, "20s", "250ms").Should(Succeed()) + }) + + // PC-07 + It("is idempotent across repeated reconciles", func() { + Expect(k8sClient.Create(ctx, pgCluster)).To(Succeed()) + + // Trigger extra update events that should not change desired state semantics. + Eventually(func() error { + pc := &enterprisev4.PostgresCluster{} + if err := k8sClient.Get(ctx, pgClusterKey, pc); err != nil { + return err + } + if pc.Annotations == nil { + pc.Annotations = map[string]string{} + } + pc.Annotations["test.bump"] = strconv.FormatInt(time.Now().UnixNano(), 10) + return k8sClient.Update(ctx, pc) // update event + }, "10s", "250ms").Should(Succeed()) + + Eventually(func(g Gomega) { + cnpg := &cnpgv1.Cluster{} + g.Expect(k8sClient.Get(ctx, pgClusterKey, cnpg)).To(Succeed()) + g.Expect(cnpg.Spec.Instances).To(Equal(int(clusterMemberCount))) + }, "20s", "250ms").Should(Succeed()) + }) }) - It("PC-07 is idempotent across repeated reconciles", func() {}) }) - Context("Deletion and finalizer", func() { - It("PC-03 Delete policy removes children and finalizer", func() {}) - It("PC-04 Retain policy preserves children and removes ownerRefs", func() {}) - }) + When("deleting a PostgresCluster", func() { + // PC-03 + Context("and clusterDeletionPolicy is set to Delete", func() { + It("removes children and finalizer", func() { + Expect(k8sClient.Create(ctx, pgCluster)).To(Succeed()) + + pc := &enterprisev4.PostgresCluster{} + Expect(k8sClient.Get(ctx, pgClusterKey, pc)).To(Succeed()) + Expect(k8sClient.Delete(ctx, pc)).To(Succeed()) // delete event + + Eventually(func() bool { + err := k8sClient.Get(ctx, pgClusterKey, &enterprisev4.PostgresCluster{}) + return apierrors.IsNotFound(err) + }, "30s", "250ms").Should(BeTrue()) + }) + }) - Context("Failure and drift", func() { - It("PC-05 fails when PostgresClusterClass is missing", func() {}) - It("PC-06 restores drifted managed spec", func() {}) - }) + // PC-04 + Context("when clusterDeletionPolicy is set to Retain", func() { + It("preserves retained resources and removes owner refs", func() { + Expect(k8sClient.Create(ctx, pgCluster)).To(Succeed()) + + // Trigger update event: switch policy to Retain before delete. + Eventually(func() error { + pc := &enterprisev4.PostgresCluster{} + if err := k8sClient.Get(ctx, pgClusterKey, pc); err != nil { + return err + } + pc.Spec.ClusterDeletionPolicy = &[]string{retainPolicy}[0] + return k8sClient.Update(ctx, pc) + }, "10s", "250ms").Should(Succeed()) + + pc := &enterprisev4.PostgresCluster{} + Expect(k8sClient.Get(ctx, pgClusterKey, pc)).To(Succeed()) + Expect(k8sClient.Delete(ctx, pc)).To(Succeed()) // delete event - Context("Predicates", func() { - It("PC-08 triggers on generation/finalizer/deletion changes", func() {}) - It("PC-09 ignores no-op updates", func() {}) + Eventually(func() bool { + err := k8sClient.Get(ctx, pgClusterKey, &enterprisev4.PostgresCluster{}) + return apierrors.IsNotFound(err) + }, "30s", "250ms").Should(BeTrue()) + + }) + }) }) - // Context("When reconciling a resource", func() { + When("reconciling with invalid or drifted dependencies", func() { + // PC-05 + Context("when referenced class does not exist", func() { + It("fails with class-not-found condition", func() { + clusterName = "bad-" + clusterName + className = "missing-class" - // It("should successfully reconcile the resource", func() { - // By("Reconciling the created resource") - // // controllerReconciler := &PostgresClusterReconciler{ - // // Client: k8sClient, - // // Scheme: k8sClient.Scheme(), - // // } + bad := &enterprisev4.PostgresCluster{ + ObjectMeta: metav1.ObjectMeta{Name: clusterName, Namespace: namespace}, + Spec: enterprisev4.PostgresClusterSpec{Class: className}, + } + Expect(k8sClient.Create(ctx, bad)).To(Succeed()) // create event - // // _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ - // // NamespacedName: typeNamespacedName, - // // }) - // err := errors.New("test error") - // Expect(err).NotTo(HaveOccurred()) + Eventually(func() bool { + current := &enterprisev4.PostgresCluster{} + if err := k8sClient.Get(ctx, types.NamespacedName{Name: bad.Name, Namespace: namespace}, current); err != nil { + return false + } + cond := meta.FindStatusCondition(current.Status.Conditions, "ClusterReady") + return cond != nil && cond.Reason == "ClusterClassNotFound" + }, "20s", "250ms").Should(BeTrue()) + }) + }) + + // PC-06 + Context("when managed child spec drifts from desired state", func() { + It("restores drifted managed spec", func() { + Expect(k8sClient.Create(ctx, pgCluster)).To(Succeed()) - // }) - // }) + Eventually(func() error { + return k8sClient.Get(ctx, pgClusterKey, &cnpgv1.Cluster{}) + }, "20s", "250ms").Should(Succeed()) + + Eventually(func() error { + pc := &enterprisev4.PostgresCluster{} + if err := k8sClient.Get(ctx, pgClusterKey, pc); err != nil { + return err + } + if pc.Annotations == nil { + pc.Annotations = map[string]string{} + } + pc.Annotations["drift-trigger"] = strconv.FormatInt(time.Now().UnixNano(), 10) + pc.Spec.Instances = &[]int32{8}[0] + return k8sClient.Update(ctx, pc) + }, "10s", "250ms").Should(Succeed()) + + Eventually(func() bool { + cnpg := &cnpgv1.Cluster{} + if err := k8sClient.Get(ctx, pgClusterKey, cnpg); err != nil { + return false + } + return cnpg.Spec.Instances == int(clusterMemberCount) + }, "20s", "250ms").Should(BeTrue()) + }) + }) + }) }) diff --git a/internal/controller/suite_test.go b/internal/controller/suite_test.go index 30cb99f64..d614aa7f5 100644 --- a/internal/controller/suite_test.go +++ b/internal/controller/suite_test.go @@ -1,5 +1,5 @@ /* -Copyright 2026. +Copyright (c) 2018-2022 Splunk Inc. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,138 +21,173 @@ import ( "fmt" "os" "path/filepath" - "sort" "testing" + "time" cnpgv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - - "k8s.io/client-go/kubernetes/scheme" + "go.uber.org/zap/zapcore" "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" - enterprisev4 "github.com/splunk/splunk-operator/api/v4" - // +kubebuilder:scaffold:imports -) - -// These tests use Ginkgo (BDD-style Go testing framework). Refer to -// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + ctrl "sigs.k8s.io/controller-runtime" -var ( - ctx context.Context - cancel context.CancelFunc - testEnv *envtest.Environment - cfg *rest.Config - k8sClient client.Client + enterpriseApiV3 "github.com/splunk/splunk-operator/api/v3" + enterpriseApi "github.com/splunk/splunk-operator/api/v4" + //+kubebuilder:scaffold:imports ) -func TestControllers(t *testing.T) { +var cfg *rest.Config +var k8sClient client.Client +var testEnv *envtest.Environment +var k8sManager ctrl.Manager + +func TestAPIs(t *testing.T) { RegisterFailHandler(Fail) RunSpecs(t, "Controller Suite") } -var _ = BeforeSuite(func() { - logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) - - ctx, cancel = context.WithCancel(context.TODO()) - - var err error - err = enterprisev4.AddToScheme(scheme.Scheme) - Expect(err).NotTo(HaveOccurred()) - err = cnpgv1.AddToScheme(scheme.Scheme) - Expect(err).NotTo(HaveOccurred()) - - // +kubebuilder:scaffold:scheme +var _ = BeforeSuite(func(ctx context.Context) { + opts := zap.Options{ + Development: true, + TimeEncoder: zapcore.RFC3339NanoTimeEncoder, + } + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true), zap.UseFlagOptions(&opts))) By("bootstrapping test environment") - cnpgCRDDirectory, err := getCNPGCRDDirectory() - Expect(err).NotTo(HaveOccurred()) + testEnv = &envtest.Environment{ - CRDDirectoryPaths: []string{ - filepath.Join("..", "..", "config", "crd", "bases"), - cnpgCRDDirectory, - }, + CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")}, ErrorIfCRDPathMissing: true, } - - // Retrieve the first found binary directory to allow running tests from IDEs - if getFirstFoundEnvTestBinaryDir() != "" { - testEnv.BinaryAssetsDirectory = getFirstFoundEnvTestBinaryDir() + cnpgCRDDir := os.Getenv("CNPG_CRD_DIR") + if cnpgCRDDir == "" { + homeDir, err := os.UserHomeDir() + Expect(err).NotTo(HaveOccurred()) + matches, err := filepath.Glob(filepath.Join(homeDir, "go", "pkg", "mod", "github.com", "cloudnative-pg", "cloudnative-pg@*", "config", "crd", "bases")) + Expect(err).NotTo(HaveOccurred()) + Expect(matches).NotTo(BeEmpty(), "CNPG CRD directory not found; set CNPG_CRD_DIR if module cache is custom") + cnpgCRDDir = matches[len(matches)-1] } + testEnv.CRDDirectoryPaths = append(testEnv.CRDDirectoryPaths, cnpgCRDDir) + + var err error // cfg is defined in this file globally. cfg, err = testEnv.Start() Expect(err).NotTo(HaveOccurred()) Expect(cfg).NotTo(BeNil()) - k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + err = enterpriseApi.AddToScheme(clientgoscheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + err = cnpgv1.AddToScheme(clientgoscheme.Scheme) Expect(err).NotTo(HaveOccurred()) - Expect(k8sClient).NotTo(BeNil()) -}) -var _ = AfterSuite(func() { - By("tearing down the test environment") - cancel() - err := testEnv.Stop() + err = enterpriseApiV3.AddToScheme(clientgoscheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + err = enterpriseApi.AddToScheme(clientgoscheme.Scheme) Expect(err).NotTo(HaveOccurred()) -}) -// getFirstFoundEnvTestBinaryDir locates the first binary in the specified path. -// ENVTEST-based tests depend on specific binaries, usually located in paths set by -// controller-runtime. When running tests directly (e.g., via an IDE) without using -// Makefile targets, the 'BinaryAssetsDirectory' must be explicitly configured. -// -// This function streamlines the process by finding the required binaries, similar to -// setting the 'KUBEBUILDER_ASSETS' environment variable. To ensure the binaries are -// properly set up, run 'make setup-envtest' beforehand. -func getFirstFoundEnvTestBinaryDir() string { - basePath := filepath.Join("..", "..", "bin", "k8s") - entries, err := os.ReadDir(basePath) - if err != nil { - logf.Log.Error(err, "Failed to read directory", "path", basePath) - return "" + err = enterpriseApi.AddToScheme(clientgoscheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + err = enterpriseApiV3.AddToScheme(clientgoscheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + err = enterpriseApi.AddToScheme(clientgoscheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + //+kubebuilder:scaffold:scheme + + // Create New Manager for controller + k8sManager, err = ctrl.NewManager(cfg, ctrl.Options{ + Scheme: clientgoscheme.Scheme, + }) + Expect(err).ToNot(HaveOccurred()) + if err := (&ClusterManagerReconciler{ + Client: k8sManager.GetClient(), + Scheme: k8sManager.GetScheme(), + }).SetupWithManager(k8sManager); err != nil { + Expect(err).NotTo(HaveOccurred()) } - for _, entry := range entries { - if entry.IsDir() { - return filepath.Join(basePath, entry.Name()) - } + if err := (&ClusterMasterReconciler{ + Client: k8sManager.GetClient(), + Scheme: k8sManager.GetScheme(), + }).SetupWithManager(k8sManager); err != nil { + Expect(err).NotTo(HaveOccurred()) } - return "" -} - -func getCNPGCRDDirectory() (string, error) { - // Optional escape hatch for CI/local overrides. - if explicit := os.Getenv("CNPG_CRD_DIR"); explicit != "" { - return explicit, nil + if err := (&IndexerClusterReconciler{ + Client: k8sManager.GetClient(), + Scheme: k8sManager.GetScheme(), + }).SetupWithManager(k8sManager); err != nil { + Expect(err).NotTo(HaveOccurred()) } - - moduleRoot := os.Getenv("GOMODCACHE") - if moduleRoot == "" { - gopath := os.Getenv("GOPATH") - if gopath == "" { - home, err := os.UserHomeDir() - if err != nil { - return "", err - } - gopath = filepath.Join(home, "go") - } - moduleRoot = filepath.Join(gopath, "pkg", "mod") + if err := (&IngestorClusterReconciler{ + Client: k8sManager.GetClient(), + Scheme: k8sManager.GetScheme(), + }).SetupWithManager(k8sManager); err != nil { + Expect(err).NotTo(HaveOccurred()) } - - pattern := filepath.Join(moduleRoot, "github.com", "cloudnative-pg", "cloudnative-pg@*", "config", "crd", "bases") - matches, err := filepath.Glob(pattern) - if err != nil { - return "", err + if err := (&LicenseManagerReconciler{ + Client: k8sManager.GetClient(), + Scheme: k8sManager.GetScheme(), + }).SetupWithManager(k8sManager); err != nil { + Expect(err).NotTo(HaveOccurred()) + } + if err := (&LicenseMasterReconciler{ + Client: k8sManager.GetClient(), + Scheme: k8sManager.GetScheme(), + }).SetupWithManager(k8sManager); err != nil { + Expect(err).NotTo(HaveOccurred()) + } + if err := (&MonitoringConsoleReconciler{ + Client: k8sManager.GetClient(), + Scheme: k8sManager.GetScheme(), + }).SetupWithManager(k8sManager); err != nil { + Expect(err).NotTo(HaveOccurred()) } - if len(matches) == 0 { - return "", fmt.Errorf("CNPG CRD directory not found; set CNPG_CRD_DIR or download github.com/cloudnative-pg/cloudnative-pg module") + if err := (&SearchHeadClusterReconciler{ + Client: k8sManager.GetClient(), + Scheme: k8sManager.GetScheme(), + }).SetupWithManager(k8sManager); err != nil { + Expect(err).NotTo(HaveOccurred()) + } + if err := (&StandaloneReconciler{ + Client: k8sManager.GetClient(), + Scheme: k8sManager.GetScheme(), + }).SetupWithManager(k8sManager); err != nil { + Expect(err).NotTo(HaveOccurred()) + } + if err := (&PostgresClusterReconciler{ + Client: k8sManager.GetClient(), + Scheme: k8sManager.GetScheme(), + }).SetupWithManager(k8sManager); err != nil { + Expect(err).NotTo(HaveOccurred()) } - sort.Strings(matches) - return matches[len(matches)-1], nil -} + go func() { + err = k8sManager.Start(ctrl.SetupSignalHandler()) + fmt.Printf("error %v", err.Error()) + Expect(err).ToNot(HaveOccurred()) + }() + + Expect(err).ToNot(HaveOccurred()) + + k8sClient, err = client.New(cfg, client.Options{Scheme: clientgoscheme.Scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient).NotTo(BeNil()) + +}, NodeTimeout(time.Second*500)) + +var _ = AfterSuite(func() { + By("tearing down the test environment") + testEnv.Stop() +})