From 595054f4be3bedcc846ab253a44e80a5670824c0 Mon Sep 17 00:00:00 2001 From: elbe0116 Date: Wed, 15 Apr 2026 17:47:49 +0500 Subject: [PATCH] feat(integration-tests): add ATP Storage S3 integration Wire Helm values and CR fields for ATP storage, secrets, and test pod env. Document ATP parameters in README. --- README.md | 19 +++ docs/public/architecture.md | 3 + docs/public/features/query-exporter.md | 6 +- docs/public/installation.md | 120 +++++++------- operator/api/apps/v1/postgresservice_types.go | 16 ++ operator/api/apps/v1/zz_generated.deepcopy.go | 40 +++++ .../api/common/v1/zz_generated.deepcopy.go | 2 + operator/api/patroni/v1/patronicore_types.go | 16 ++ .../api/patroni/v1/zz_generated.deepcopy.go | 40 +++++ .../crds/netcracker.com_patronicores.yaml | 25 ++- .../charts/patroni-core/templates/cr.yaml | 24 +++ .../templates/tests-atp-storage-secret.yaml | 12 ++ operator/charts/patroni-core/values.yaml | 24 +++ .../crds/netcracker.com_patroniservices.yaml | 25 ++- .../charts/patroni-services/templates/cr.yaml | 26 +++ .../templates/tests-atp-storage-secret.yaml | 12 ++ operator/charts/patroni-services/values.yaml | 26 +++ operator/pkg/deployment/tests.go | 150 +++++++++++++++++- services/backup-daemon/Dockerfile | 1 + services/upgrade/docker/start.sh | 34 ++-- tests/Dockerfile | 23 ++- tests/docker/pgskipper-robot-entrypoint.sh | 6 + tests/docker/requirements.txt | 13 +- 23 files changed, 573 insertions(+), 90 deletions(-) create mode 100644 docs/public/architecture.md create mode 100644 operator/charts/patroni-core/templates/tests-atp-storage-secret.yaml create mode 100644 operator/charts/patroni-services/templates/tests-atp-storage-secret.yaml create mode 100755 tests/docker/pgskipper-robot-entrypoint.sh diff --git a/README.md b/README.md index f23b9a3c..ee8947cd 100644 --- a/README.md +++ b/README.md @@ -14,6 +14,25 @@ Postgres-Operator provides PostgreSQL as a service on Kubernetes and OpenShift. Please refer to the [Quick Start Guide](/docs/public/quickstart.md) +### Integration tests and ATP storage + +Integration test settings live under `tests` in the Helm values for **patroni-core** and **patroni-services** (see [`operator/charts/patroni-core/values.yaml`](operator/charts/patroni-core/values.yaml) and [`operator/charts/patroni-services/values.yaml`](operator/charts/patroni-services/values.yaml)). The test image is based on [qubership-docker-integration-tests](https://github.com/Netcracker/qubership-docker-integration-tests). + +ATP-related Helm values are `atpReport` (with nested `atpReport.atpStorage`), `atpReportViewUiUrl`, and `environmentName`. The chart maps them into the Custom Resource and the operator sets the usual `ATP_*` and `ENVIRONMENT_NAME` environment variables on the integration test pod. + +| Value (Helm) | Description | +|------------------------------------|-------------| +| `atpReport.enabled` | Opt-in for ATP report upload; when `false`, S3-related env vars are not applied as in other product charts. | +| `atpReport.atpStorage.provider` | S3 provider (for example `aws`, `minio`, `s3`). | +| `atpReport.atpStorage.serverUrl` | S3 API endpoint URL. | +| `atpReport.atpStorage.serverUiUrl` | Optional storage UI URL. | +| `atpReport.atpStorage.bucket` | Bucket name; empty usually means no S3 upload in the base image flow. | +| `atpReport.atpStorage.region` | Region (for example for AWS). | +| `atpReport.atpStorage.username` | Access key (sensitive; stored in Kubernetes Secret when `atpReport.enabled=true`). | +| `atpReport.atpStorage.password` | Secret key (same as username; stored in Kubernetes Secret when `atpReport.enabled=true`). | +| `atpReportViewUiUrl` | Optional Allure report UI base URL. | +| `environmentName` | Optional logical name for paths or labels. | + ### Smoke tests There is no smoke tests. diff --git a/docs/public/architecture.md b/docs/public/architecture.md new file mode 100644 index 00000000..94e4075f --- /dev/null +++ b/docs/public/architecture.md @@ -0,0 +1,3 @@ +# Architecture + +## Postgres operator diff --git a/docs/public/features/query-exporter.md b/docs/public/features/query-exporter.md index 05242317..b2d8834e 100644 --- a/docs/public/features/query-exporter.md +++ b/docs/public/features/query-exporter.md @@ -29,7 +29,7 @@ However for managed databases these extensions must be enabled for database inst ## Migration from postgres-exporter -Please check [new queries format](/charts/patroni-services/query-exporter/query-exporter-queries.yaml) for query-exporter. +Please check [new queries format](/operator/charts/patroni-services/query-exporter/query-exporter-queries.yaml) for query-exporter. For custom queries two sections must be used in config: `metrics` and `queries`. Queries section includes map of queries. Each query now include next mandatory fields: @@ -174,7 +174,7 @@ In this case query `pg_example` will be executed for all databases matching at l In postgres-operator new watchers are created for namespaces, listed in deployment parameters. These watchers react to Create, Update, Delete events for config maps with labels from `queryExporter.customQueries.labels` parameter and mandatory label -```query-exporter: custom-queries```. Config maps should contain metrics with custom queries for Query Exporter. Metrics must correspond to the [query exporter format](/charts/patroni-services/query-exporter/query-exporter-queries.yaml) and must meet [metric naming rules](https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels)). +```query-exporter: custom-queries```. Config maps should contain metrics with custom queries for Query Exporter. Metrics must correspond to the [query exporter format](/operator/charts/patroni-services/query-exporter/query-exporter-queries.yaml) and must meet [metric naming rules](https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels)). After the Create event, changes from created config map will be appended to `query-exporter-queries` config map. After the Modify event, changes from config map will be replaced in `query-exporter-queries` config map. After the Delete event, changes from config map will be deleted from `query-exporter-queries` config map. @@ -202,7 +202,7 @@ queryExporter: - "pg_lock_tree_query" - "connection_by_role_with_limit_query" ``` -Names of the queries can be found in [query-exporter-queries](/charts/patroni-services/query-exporter/query-exporter-queries.yaml) configmap. All metrics for excluded query will be automatically excluded. +Names of the queries can be found in [query-exporter-queries](/operator/charts/patroni-services/query-exporter/query-exporter-queries.yaml) configmap. All metrics for excluded query will be automatically excluded. # Self monitoring diff --git a/docs/public/installation.md b/docs/public/installation.md index 3ab9a7d7..09abae73 100644 --- a/docs/public/installation.md +++ b/docs/public/installation.md @@ -88,13 +88,13 @@ For Openshift version 4.8 and above OpenShift Container Platform ships with a de Follow these steps to enable the default seccomp profile for all pods: -1) Export the available restricted SCC to a yaml file: +1) Export the available restricted SCC to a YAML file: ``` $ oc get scc restricted -o yaml > restricted-seccomp.yaml ``` -2) Edit the created restricted SCC yaml file: +2) Edit the created restricted SCC YAML file: ``` $ vi restricted-seccomp.yaml @@ -206,8 +206,8 @@ The general parameters used for the configurations are specified below. | serviceAccount.create | bool | no | true | Specifies whether a service account needs to be created. | | serviceAccount.name | string | no | postgres-sa | Specifies name of the Service Account under which Postgres Operator will work. | | runTestsOnly | bool | no | false | Indicates whether to run Integration Tests (skipping deploy step) only or not. | -| affinity | json | no | n/a | Defines affinity scheduling rules for all components. Can be overridden per component. | -| podLabels | yaml | no | n/a | Specifies custom pod labels for all the components. Can be overridden per component. | +| affinity | JSON | no | n/a | Defines affinity scheduling rules for all components. Can be overridden per component. | +| podLabels | YAML | no | n/a | Specifies custom pod labels for all the components. Can be overridden per component. | **Note**: `postgresUser` is not the user which will be created during deployment. You should mention here the user which is already present with superuser role. If you need to use some other user instead of postgres, you should create the desired user manually with superuser role. @@ -218,11 +218,11 @@ This sections describes all possible deploy parameters for PostgreSQL Operator. | Parameter | Type | Mandatory | Default value | Description | |-------------------------------------------------|--------|-----------|---------------|----------------------------------------------------------------------------------------| | operator.resources.requests.memory | string | no | 50Mi | Specifies memory requests for Postgres Operator. | -| operator.resources.requests.cpu | string | no | 50m | Specifies cpu requests for Postgres Operator. | +| operator.resources.requests.cpu | string | no | 50m | Specifies CPU requests for Postgres Operator. | | operator.resources.limits.memory | string | no | 50Mi | Specifies memory limits for Postgres Operator. | -| operator.resources.limits.cpu | string | no | 50m | Specifies cpu limits for Postgres Operator. | -| operator.affinity | json | no | n/a | Specifies the affinity scheduling rules. | -| operator.podLabels | yaml | no | n/a | Specifies custom pod labels for Postgres Operator. | +| operator.resources.limits.cpu | string | no | 50m | Specifies CPU limits for Postgres Operator. | +| operator.affinity | JSON | no | n/a | Specifies the affinity scheduling rules. | +| operator.podLabels | YAML | no | n/a | Specifies custom pod labels for Postgres Operator. | | operator.waitTimeout | string | no | 10 | Specifies the timeouts in minutes for Postgres Operator to wait for successful checks. | | operator.reconcileRetries | string | no | 3 | Specifies the number of retries in single reconcile loop for Postgres Operator. | @@ -235,15 +235,15 @@ This sections describes all possible deploy parameters for Patroni component. | patroni.install | bool | no | true | Indicates whether to install Patroni component or not. Should be set to `no` in case of Managed DBs. | | patroni.clusterName | string | no | patroni | Specifies Patroni cluster name.. | | patroni.resources.requests.memory | string | no | 250Mi | Specifies memory requests. | -| patroni.resources.requests.cpu | string | no | 125m | Specifies cpu requests. | +| patroni.resources.requests.cpu | string | no | 125m | Specifies CPU requests. | | patroni.resources.limits.memory | string | no | 500Mi | Specifies memory limits. | -| patroni.resources.limits.cpu | string | no | 250m | Specifies cpu limits. | +| patroni.resources.limits.cpu | string | no | 250m | Specifies CPU limits. | | patroni.resources.unlimited | bool | no | false | Specifies if we should skip setting limits for Patroni. | | patroni.postgreSQLParams | []string | no | [Default PostgreSQL parameters](#default-postgresql-parameters) | Specifies PostgreSQL parameters. Values should be specified as a string list of `key: value` parameters. | | patroni.patroniParams | []string | no | n/a | Specifies Patroni configuration parameters. Values should be specified as a string list of `key: value` parameters. | | patroni.securityContext | [Kubernetes Sec Context](https://pkg.go.dev/k8s.io/api/core/v1#SecurityContext) | no | n/a | Specifies pod level security attributes and common container settings. | -| patroni.standbyCluster.host | string | no | n/a | Specifies host of active Postgresql cluster for Patroni standby cluster configuration. | -| patroni.standbyCluster.port | string | no | n/a | Specifies port of active Postgresql cluster for Patroni standby cluster configuration. | +| patroni.standbyCluster.host | string | no | n/a | Specifies host of active PostgreSQL cluster for Patroni standby cluster configuration. | +| patroni.standbyCluster.port | string | no | n/a | Specifies port of active PostgreSQL cluster for Patroni standby cluster configuration. | | patroni.enableShmVolume | bool | no | true | Specifies should tmpfs mount for /dev/shm be used in Patroni pods. | | patroni.powa.install | bool | no | true | Indicates whether to configure POWA for PostgreSQL or not. | | patroni.powa.password | string | no | Pow@pASsWORD | Specifies password for POWA user. | @@ -259,11 +259,11 @@ This sections describes all possible deploy parameters for Patroni component. | patroni.pgWalStorage | Storage Group | no | n/a | Specifies set of storage parameters for separater volume for `pg_wal` directory. Parameters are the same as for `storage`. | | patroni.pgWalStorageAutoManage | bool | no | n/a | Specifies is pg_wal files have to be moved to separate volume `pg_wal` directory automatically. | | patroni.priorityClassName | string | no | n/a | Specifies [Priority Class](https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass). | -| patroni.affinity | json | no | n/a | Specifies the affinity scheduling rules. | -| patroni.podLabels | yaml | no | n/a | Specifies custom pod labels. | -| patroni.external.pvc | yaml | no | n/a | Specifies list of pvcs to mount them to patroni pods. | -| patroni.external.pvc.name | yaml | no | n/a | Specifies name of pvc to mount it to patroni pods. | -| patroni.external.pvc.mountPath | yaml | no | n/a | Specifies path on patroni pod for mounted pvc. | +| patroni.affinity | JSON | no | n/a | Specifies the affinity scheduling rules. | +| patroni.podLabels | YAML | no | n/a | Specifies custom pod labels. | +| patroni.external.pvc | YAML | no | n/a | Specifies list of pvcs to mount them to patroni pods. | +| patroni.external.pvc.name | YAML | no | n/a | Specifies name of pvc to mount it to patroni pods. | +| patroni.external.pvc.mountPath | YAML | no | n/a | Specifies path on patroni pod for mounted pvc. | ## majorUpgrade @@ -303,9 +303,9 @@ Patroni Core Operator allows configuration of TLS for PostgreSQL. By default, re | tls.generateCerts.subjectAlternativeName.additionalDnsNames | []string | no | n/a | Specifies the list of additional DNS names to be added to the "Subject Alternative Name" field of SSL certificate. If access to Postgres Service for external clients is enabled, DNS names from externalHostNames parameter must be specified in here. | | tls.generateCerts.subjectAlternativeName.additionalIpAddresses | []string | no | n/a | Specifies the list of additional IP addresses to be added to the "Subject Alternative Name" field of SSL certificate. If access to Postgres Service for external clients is enabled, IP addresses from externalHostNames parameter must be specified in here. | | tls.generateCerts.clusterIssuerName | string | yes | n/a | Specifies name of `ClusterIssuer` resource. If the parameter is not set or empty, `Issuer` resource in current Kubernetes namespace will be used. | -| tls.certificates.tls_crt | string | no | "" | Specifies the certificate in BASE64 format. It is required if tls.enabled is true and tls.generateCerts.enabled is false. This allows user to specify their own certificate. | -| tls.certificates.tls_key | string | no | "" | Specifies the private key in BASE64 format. It is required if tls.enabled is true and tls.generateCerts.enabled is false. This allows user to specify their own key. | -| tls.certificates.ca_crt | string | no | "" | Specifies base 64 encoded CA certificate. It is required if tls.enabled is true and tls.generateCerts.enabled is false. This allows user to specify their own ca certificate. | +| tls.certificates.tls_crt | string | no | "" | Specifies the certificate in base64 format. It is required if tls.enabled is true and tls.generateCerts.enabled is false. This allows user to specify their own certificate. | +| tls.certificates.tls_key | string | no | "" | Specifies the private key in base64 format. It is required if tls.enabled is true and tls.generateCerts.enabled is false. This allows user to specify their own key. | +| tls.certificates.ca_crt | string | no | "" | Specifies base64 encoded CA certificate. It is required if tls.enabled is true and tls.generateCerts.enabled is false. This allows user to specify their own ca certificate. | ## pgBackRest @@ -347,8 +347,8 @@ The general parameters used for the configurations are specified below. | serviceAccount.create | bool | no | true | Specifies whether a service account needs to be created. | | serviceAccount.name | string | no | postgres-sa | Specifies name of the Service Account under which Postgres Operator will work. | | runTestsOnly | bool | no | false | Indicates whether to run Integration Tests (skipping deploy step) only or not. | -| affinity | json | no | n/a | Defines affinity scheduling rules for all components. Can be overridden per component. | -| podLabels | yaml | no | n/a | Specifies custom pod labels for all the components. Can be overridden per component. | +| affinity | JSON | no | n/a | Defines affinity scheduling rules for all components. Can be overridden per component. | +| podLabels | YAML | no | n/a | Specifies custom pod labels for all the components. Can be overridden per component. | **Note**: `postgresUser` is not the user which will be created during deployment. You should mention here the user which is already present with superuser role. If you need to use some other user instead of postgres, you should create the desired user manually with superuser role. @@ -359,11 +359,11 @@ This sections describes all possible deploy parameters for PostgreSQL Operator. | Parameter | Type | Mandatory | Default value | Description | |-------------------------------------------------|--------|-----------|---------------|----------------------------------------------------------------------------------------| | operator.resources.requests.memory | string | no | 50Mi | Specifies memory requests for Postgres Operator. | -| operator.resources.requests.cpu | string | no | 50m | Specifies cpu requests for Postgres Operator. | +| operator.resources.requests.cpu | string | no | 50m | Specifies CPU requests for Postgres Operator. | | operator.resources.limits.memory | string | no | 50Mi | Specifies memory limits for Postgres Operator. | -| operator.resources.limits.cpu | string | no | 50m | Specifies cpu limits for Postgres Operator. | -| operator.affinity | json | no | n/a | Specifies the affinity scheduling rules. | -| operator.podLabels | yaml | no | n/a | Specifies custom pod labels for Postgres Operator. | +| operator.resources.limits.cpu | string | no | 50m | Specifies CPU limits for Postgres Operator. | +| operator.affinity | JSON | no | n/a | Specifies the affinity scheduling rules. | +| operator.podLabels | YAML | no | n/a | Specifies custom pod labels for Postgres Operator. | | operator.waitTimeout | string | no | 10 | Specifies the timeouts in minutes for Postgres Operator to wait for successful checks. | | operator.reconcileRetries | string | no | 3 | Specifies the number of retries in single reconcile loop for Postgres Operator. | @@ -383,9 +383,9 @@ This sections describes all possible deploy parameters for PostgreSQL Backup Dae |----------------------------------------|---------------------------------------------------------------------------------|-----------|---------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | backupDaemon.install | bool | no | true | Indicates whether to install PostgreSQL Backup Daemon component or not. | | backupDaemon.resources.requests.memory | string | no | 256Mi | Specifies memory requests. | -| backupDaemon.resources.requests.cpu | string | no | 100m | Specifies cpu requests. | +| backupDaemon.resources.requests.cpu | string | no | 100m | Specifies CPU requests. | | backupDaemon.resources.limits.memory | string | no | 512Mi | Specifies memory limits. | -| backupDaemon.resources.limits.cpu | string | no | 250m | Specifies cpu limits. | +| backupDaemon.resources.limits.cpu | string | no | 250m | Specifies CPU limits. | | backupDaemon.securityContext | [Kubernetes Sec Context](https://pkg.go.dev/k8s.io/api/core/v1#SecurityContext) | no | n/a | Specifies pod level security attributes and common container settings. | | backupDaemon.pgHost | string | no | pg-patroni | Specifies PostgreSQL host. | | backupDaemon.walArchiving | bool | no | false | Indicates whether to save WALs files in PostgreSQL Backup Daemon. This setting can cause major disk usage impact, because each postgres WAL file size is 16MB. Also, please, note, that in case of enabled `walArchiving` memory limits for PostgreSQL Backup Daemon should be set as `1 Gib` minimal. | @@ -406,7 +406,7 @@ This sections describes all possible deploy parameters for PostgreSQL Backup Dae | backupDaemon.storage.selectors | []string | no | n/a | Specifies list of selector to choose PVCs. | | backupDaemon.storage.volumes | []string | no | n/a | Specifies list of Persistence Volumes that will be used for PVCs. Should be specified only in case of `pv` storageClass. | | backupDaemon.storage.accessMode | []string | no | n/a | Specifies list of [Access Modes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes) that will be used for PVCs. | -| backupDaemon.s3Storage.url | string | no | n/a | Specifies url address to S3 storage. | +| backupDaemon.s3Storage.url | string | no | n/a | Specifies URL address to S3 storage. | | backupDaemon.s3Storage.accessKeyId | string | no | n/a | Specifies S3 accessKeyId credential. | | backupDaemon.s3Storage.secretAccessKey | string | no | n/a | Specifies S3 secretAccessKey credential. | | backupDaemon.s3Storage.bucket | string | no | n/a | Specifies name of S3 Bucket. | @@ -417,8 +417,8 @@ This sections describes all possible deploy parameters for PostgreSQL Backup Dae | backupDaemon.externalPv.capacity | string | no | n/a | Specifies capacity of External PV. | | backupDaemon.externalPv.storageClass | string | no | n/a | Specifies StorageClass of External PV. | | backupDaemon.priorityClassName | string | no | n/a | Specifies [Priority Class](https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass). | -| backupDaemon.affinity | json | no | n/a | Specifies the affinity scheduling rules. | -| backupDaemon.podLabels | yaml | no | n/a | Specifies custom pod labels. | +| backupDaemon.affinity | JSON | no | n/a | Specifies the affinity scheduling rules. | +| backupDaemon.podLabels | YAML | no | n/a | Specifies custom pod labels. | ## metricCollector @@ -428,14 +428,14 @@ This sections describes all possible deploy parameters for PostgreSQL Metric Col |-------------------------------------------------------------------------------|---------------------------------------------------------------------------------|-----------|-----------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | metricCollector.install | bool | no | true | Indicates whether to install PostgreSQL Metric Collector component or not. | | metricCollector.resources.requests.memory | string | no | 128Mi | Specifies memory requests. | -| metricCollector.resources.requests.cpu | string | no | 150m | Specifies cpu requests. | +| metricCollector.resources.requests.cpu | string | no | 150m | Specifies CPU requests. | | metricCollector.resources.limits.memory | string | no | 256Mi | Specifies memory limits. | -| metricCollector.resources.limits.cpu | string | no | 300m | Specifies cpu limits. | +| metricCollector.resources.limits.cpu | string | no | 300m | Specifies CPU limits. | | metricCollector.securityContext | [Kubernetes Sec Context](https://pkg.go.dev/k8s.io/api/core/v1#SecurityContext) | no | n/a | Specifies pod level security attributes and common container settings. | | metricCollector.collectionInterval | int | no | 60 | Specifies interval in seconds to execute Telegraf's plugins. | | metricCollector.scrapeTimeout | int | no | 20 | Specifies timeout in seconds to wait metric be gathered. | | metricCollector.telegrafPluginTimeout | int | no | 60 | Specifies timeout in seconds to execute Telegraf's plugins. | -| metricCollector.userPassword | yaml | no | p@ssWOrD1 | Specifies the password for metric collector user. | +| metricCollector.userPassword | YAML | no | p@ssWOrD1 | Specifies the password for metric collector user. | | metricCollector.ocExecTimeout | int | no | 10 | Specifies timeout in seconds to execute `exec` commands. | | metricCollector.devMetricsInterval | int | no | 10 | Specifies interval in minutes to execute Telegraf's plugins for additional metrics. | | metricCollector.devMetricsTimeout | int | no | 10 | Timeout in minutes to execute command for additional metrics. | @@ -453,8 +453,8 @@ This sections describes all possible deploy parameters for PostgreSQL Metric Col | metricCollector.prometheusRules.maxConnectionExceedPercentageThreshold | int | no | 90 | Specifies the value of exceed max_connection percentage threshold. Value can be set from 0 to 100. | | metricCollector.prometheusRules.maxConnectionReachedPercentageThreshold | int | no | 80 | Specifies the value of reached max_connection percentage threshold. Value can be set from 0 to 100. | | metricCollector.priorityClassName | string | no | n/a | Specifies [Priority Class](https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass). | -| metricCollector.affinity | json | no | n/a | Specifies the affinity scheduling rules. | -| metricCollector.podLabels | yaml | no | n/a | Specifies custom pod labels. | +| metricCollector.affinity | JSON | no | n/a | Specifies the affinity scheduling rules. | +| metricCollector.podLabels | YAML | no | n/a | Specifies custom pod labels. | ## dbaas @@ -464,9 +464,9 @@ This sections describes all possible deploy parameters for PostgreSQL DBaaS Adap |---------------------------------------------|-------------------|-----------|-----------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------| | dbaas.install | bool | no | true | Indicates whether to install PostgreSQL DBaaS Adapter component or not. | | dbaas.resources.requests.memory | string | no | 64Mi | Specifies memory requests. | -| dbaas.resources.requests.cpu | string | no | 200m | Specifies cpu requests. | +| dbaas.resources.requests.cpu | string | no | 200m | Specifies CPU requests. | | dbaas.resources.limits.memory | string | no | 64Mi | Specifies memory limits. | -| dbaas.resources.limits.cpu | string | no | 200m | Specifies cpu limits. | +| dbaas.resources.limits.cpu | string | no | 200m | Specifies CPU limits. | | dbaas.pgHost | string | no | pg-patroni. | Specifies PostgreSQL host.. | | dbaas.pgPort | string | no | 5432 | Specifies port for connection to PostgreSQL. | | dbaas.dbName | string | no | postgres | Specifies name of PostgreSQL database to connect by default. | @@ -483,8 +483,8 @@ This sections describes all possible deploy parameters for PostgreSQL DBaaS Adap | dbaas.apiVersion | string | no | v2 | Specifies the version of DBaaS API. | | dbaas.multiUsers | bool | no | true | Specifies if Multi Users functionality is enabled. | | dbaas.priorityClassName | string | no | n/a | Specifies [Priority Class](https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass). | -| dbaas.affinity | json | no | n/a | Specifies the affinity scheduling rules. | -| dbaas.podLabels | yaml | no | n/a | Specifies custom pod labels. | +| dbaas.affinity | JSON | no | n/a | Specifies the affinity scheduling rules. | +| dbaas.podLabels | YAML | no | n/a | Specifies custom pod labels. | | dbaas.debug | bool | no | false | Specifies if debug logs are enabled. | | dbaas.updateRoles | bool | no | false | Specifies if roles migration process must be performed. | | INTERNAL_TLS_ENABLED | bool | no | false | Specifies if HTTPS should be enabled for DBaaS Adapter Endpoints and specification of certificates in requests to DBaaS Aggregator. | @@ -589,13 +589,13 @@ This sections describes all possible deploy parameters for Query Exporter compon | queryExporter.install | bool | no | false | Indicates that Query Exporter should be installed or not. | | queryExporter.securityContext | [Kubernetes Sec Context](https://pkg.go.dev/k8s.io/api/core/v1#SecurityContext) | no | n/a | Specifies pod level security attributes and common container settings. | | queryExporter.resources.requests.memory | string | no | 128Mi | Specifies memory requests. | -| queryExporter.resources.requests.cpu | string | no | 150m | Specifies cpu requests. | +| queryExporter.resources.requests.cpu | string | no | 150m | Specifies CPU requests. | | queryExporter.resources.limits.memory | string | no | 128Mi | Specifies memory limits. | -| queryExporter.resources.limits.cpu | string | no | 300m | Specifies cpu limits. | +| queryExporter.resources.limits.cpu | string | no | 300m | Specifies CPU limits. | | queryExporter.scrapeTimeout | int | no | 10 | Specifies the timeout in seconds after which the scrape is ended. | | queryExporter.queryTimeout | int | no | 30 | Specifies the timeout in seconds for single query execution. | -| queryExporter.affinity | json | no | n/a | Specifies the affinity scheduling rules. | -| queryExporter.podLabels | yaml | no | n/a | Specifies custom pod labels. | +| queryExporter.affinity | JSON | no | n/a | Specifies the affinity scheduling rules. | +| queryExporter.podLabels | YAML | no | n/a | Specifies custom pod labels. | | queryExporter.pgUser | string | no | query-exporter | Specifies name of user to create for postgres exporter. | | queryExporter.pgPassword | string | no | PaSsw0rDfoRExporT3r | Specifies password for postgres exporter user. | | queryExporter.maxMasterConnections | int | no | 10 | Specifies the number of simultaneous connections for master database. | @@ -619,12 +619,12 @@ This sections describes all possible deploy parameters for PoWA UI component. | powaUI.ingress.enabled | bool | no | true | Specifies Ingress should be enabled. | | powaUI.securityContext | [Kubernetes Sec Context](https://pkg.go.dev/k8s.io/api/core/v1#SecurityContext) | no | n/a | Specifies pod level security attributes and common container settings. | | powaUI.resources.requests.memory | string | no | 256Mi | Specifies memory requests. | -| powaUI.resources.requests.cpu | string | no | 200m | Specifies cpu requests. | +| powaUI.resources.requests.cpu | string | no | 200m | Specifies CPU requests. | | powaUI.resources.limits.memory | string | no | 512Mi | Specifies memory limits. | -| powaUI.resources.limits.cpu | string | no | 500m | Specifies cpu limits. | +| powaUI.resources.limits.cpu | string | no | 500m | Specifies CPU limits. | | powaUI.cookieSecret | sting | no | n/a | Specifies the secret for Powa UI cookies. | -| powaUI.affinity | json | no | n/a | Specifies the affinity scheduling rules. | -| powaUI.podLabels | yaml | no | n/a | Specifies custom pod labels. | +| powaUI.affinity | JSON | no | n/a | Specifies the affinity scheduling rules. | +| powaUI.podLabels | YAML | no | n/a | Specifies custom pod labels. | ## connectionPooler @@ -635,14 +635,14 @@ This sections describes all possible deploy parameters for Connection Pooler (PG | connectionPooler.install | bool | no | false | Indicates that PG Bouncer should be installed or not. | | connectionPooler.securityContext | [Kubernetes Sec Context](https://pkg.go.dev/k8s.io/api/core/v1#SecurityContext) | no | n/a | Specifies pod level security attributes and common container settings. | | connectionPooler.resources.requests.memory | string | no | 256Mi | Specifies memory requests. | -| connectionPooler.resources.requests.cpu | string | no | 200m | Specifies cpu requests. | +| connectionPooler.resources.requests.cpu | string | no | 200m | Specifies CPU requests. | | connectionPooler.resources.limits.memory | string | no | 512Mi | Specifies memory limits. | -| connectionPooler.resources.limits.cpu | string | no | 500m | Specifies cpu limits. | +| connectionPooler.resources.limits.cpu | string | no | 500m | Specifies CPU limits. | | connectionPooler.replicas | int | no | 1 | Specifies the number of replicas. | | connectionPooler.username | string | no | pgbouncer | Specifies the username for connection to Postgres. | | connectionPooler.password | string | no | pgbouncer | Specifies the password for connection to Postgres. | | connectionPooler.config | map[string]map[string]string | no | [Default PG Bouncer parameters](#default-pg-bouncer-parameters) | Specifies the config parameters for PGBouncer. [Config parameters](https://www.pgbouncer.org/config.html) | -| connectionPooler.affinity | json | no | n/a | Specifies the affinity scheduling rules. | +| connectionPooler.affinity | JSON | no | n/a | Specifies the affinity scheduling rules. | ## replicationController @@ -653,11 +653,11 @@ This sections describes all possible deploy parameters for Replication Controlle | replicationController.install | bool | no | false | Indicates that Replication Controller should be installed or not. | | replicationController.securityContext | [Kubernetes Sec Context](https://pkg.go.dev/k8s.io/api/core/v1#SecurityContext) | no | n/a | Specifies pod level security attributes and common container settings. | | replicationController.resources.requests.memory | string | no | 64Mi | Specifies memory requests. | -| replicationController.resources.requests.cpu | string | no | 200m | Specifies cpu requests. | +| replicationController.resources.requests.cpu | string | no | 200m | Specifies CPU requests. | | replicationController.resources.limits.memory | string | no | 64Mi | Specifies memory limits. | -| replicationController.resources.limits.cpu | string | no | 200m | Specifies cpu limits. | -| replicationController.affinity | json | no | n/a | Specifies the affinity scheduling rules. | -| replicationController.podLabels | yaml | no | n/a | Specifies custom pod labels. | +| replicationController.resources.limits.cpu | string | no | 200m | Specifies CPU limits. | +| replicationController.affinity | JSON | no | n/a | Specifies the affinity scheduling rules. | +| replicationController.podLabels | YAML | no | n/a | Specifies custom pod labels. | | replicationController.apiUser | string | no | n/a | Specifies the user for API usage. | | replicationController.apiPassword | string | no | n/a | Specifies the password for API usage. | @@ -680,15 +680,15 @@ Postgres Operator allows configuration of TLS for supplementary and other compon | Parameter | Type | Mandatory | Default value | Description | |----------------------------------------------------------------|----------|-----------|------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | tls.enabled | bool | no | false | Indicates that TLS should be enabled or not. | -| tls.certificateSecretName | string | no | pg-cert | Specifies the name of secret with certificate in PostgreSQL namespace. See [TLS Configuration](/docs/features/tls-configuration.md) | -| tls.generateCerts.enabled | bool | yes | false | Specifies whether to generate SSL certificates by cert-manager or not. If `false` specified, follow [manual certificate configuration guid](/docs/features/tls-configuration.md#manual). | +| tls.certificateSecretName | string | no | pg-cert | Specifies the name of secret with certificate in PostgreSQL namespace. See [TLS Configuration](/docs/public/features/tls-configuration.md) | +| tls.generateCerts.enabled | bool | yes | false | Specifies whether to generate SSL certificates by cert-manager or not. If `false` specified, follow [manual certificate configuration guid](/docs/public/features/tls-configuration.md#manual). | | tls.generateCerts.duration | int | no | 365 | Specifies SSL certificate validity duration in days. The default value is 365. | | tls.generateCerts.subjectAlternativeName.additionalDnsNames | []string | no | n/a | Specifies the list of additional DNS names to be added to the "Subject Alternative Name" field of SSL certificate. If access to Postgres Service for external clients is enabled, DNS names from externalHostNames parameter must be specified in here. | | tls.generateCerts.subjectAlternativeName.additionalIpAddresses | []string | no | n/a | Specifies the list of additional IP addresses to be added to the "Subject Alternative Name" field of SSL certificate. If access to Postgres Service for external clients is enabled, IP addresses from externalHostNames parameter must be specified in here. | | tls.generateCerts.clusterIssuerName | string | yes | n/a | Specifies name of `ClusterIssuer` resource. If the parameter is not set or empty, `Issuer` resource in current Kubernetes namespace will be used. | -| tls.certificates.tls_crt | string | no | "" | Specifies the certificate in BASE64 format. It is required if tls.enabled is true and tls.generateCerts.enabled is false. This allows user to specify their own certificate. | -| tls.certificates.tls_key | string | no | "" | Specifies the private key in BASE64 format. It is required if tls.enabled is true and tls.generateCerts.enabled is false. This allows user to specify their own key. | -| tls.certificates.ca_crt | string | no | "" | Specifies base 64 encoded CA certificate. It is required if tls.enabled is true and tls.generateCerts.enabled is false. This allows user to specify their own ca certificate. | +| tls.certificates.tls_crt | string | no | "" | Specifies the certificate in base64 format. It is required if tls.enabled is true and tls.generateCerts.enabled is false. This allows user to specify their own certificate. | +| tls.certificates.tls_key | string | no | "" | Specifies the private key in base64 format. It is required if tls.enabled is true and tls.generateCerts.enabled is false. This allows user to specify their own key. | +| tls.certificates.ca_crt | string | no | "" | Specifies base64 encoded CA certificate. It is required if tls.enabled is true and tls.generateCerts.enabled is false. This allows user to specify their own ca certificate. | ## pgBackRest diff --git a/operator/api/apps/v1/postgresservice_types.go b/operator/api/apps/v1/postgresservice_types.go index e6695222..b94f8987 100644 --- a/operator/api/apps/v1/postgresservice_types.go +++ b/operator/api/apps/v1/postgresservice_types.go @@ -209,6 +209,22 @@ type IntegrationTests struct { PgNodeQty int `json:"pgNodeQty,omitempty"` PodLabels map[string]string `json:"podLabels,omitempty"` Affinity v1.Affinity `json:"affinity,omitempty"` + AtpStorage *AtpStorage `json:"atpStorage,omitempty"` + AtpReport *AtpReport `json:"atpReport,omitempty"` + AtpReportViewUiUrl string `json:"atpReportViewUiUrl,omitempty"` + EnvironmentName string `json:"environmentName,omitempty"` +} + +type AtpStorage struct { + Provider string `json:"provider,omitempty"` + ServerUrl string `json:"serverUrl,omitempty"` + ServerUiUrl string `json:"serverUiUrl,omitempty"` + Bucket string `json:"bucket,omitempty"` + Region string `json:"region,omitempty"` +} + +type AtpReport struct { + Enabled bool `json:"enabled,omitempty"` } // ExternalDataBase defines the desired state of ExternalDataBase diff --git a/operator/api/apps/v1/zz_generated.deepcopy.go b/operator/api/apps/v1/zz_generated.deepcopy.go index 68b58c34..4d48aaad 100644 --- a/operator/api/apps/v1/zz_generated.deepcopy.go +++ b/operator/api/apps/v1/zz_generated.deepcopy.go @@ -10,6 +10,36 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AtpReport) DeepCopyInto(out *AtpReport) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AtpReport. +func (in *AtpReport) DeepCopy() *AtpReport { + if in == nil { + return nil + } + out := new(AtpReport) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AtpStorage) DeepCopyInto(out *AtpStorage) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AtpStorage. +func (in *AtpStorage) DeepCopy() *AtpStorage { + if in == nil { + return nil + } + out := new(AtpStorage) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BackupDaemon) DeepCopyInto(out *BackupDaemon) { *out = *in @@ -146,6 +176,16 @@ func (in *IntegrationTests) DeepCopyInto(out *IntegrationTests) { } } in.Affinity.DeepCopyInto(&out.Affinity) + if in.AtpStorage != nil { + in, out := &in.AtpStorage, &out.AtpStorage + *out = new(AtpStorage) + **out = **in + } + if in.AtpReport != nil { + in, out := &in.AtpReport, &out.AtpReport + *out = new(AtpReport) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntegrationTests. diff --git a/operator/api/common/v1/zz_generated.deepcopy.go b/operator/api/common/v1/zz_generated.deepcopy.go index a915d992..d6990f21 100644 --- a/operator/api/common/v1/zz_generated.deepcopy.go +++ b/operator/api/common/v1/zz_generated.deepcopy.go @@ -4,6 +4,8 @@ package v1 +import () + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Storage) DeepCopyInto(out *Storage) { *out = *in diff --git a/operator/api/patroni/v1/patronicore_types.go b/operator/api/patroni/v1/patronicore_types.go index 1c672f6a..1c04d276 100644 --- a/operator/api/patroni/v1/patronicore_types.go +++ b/operator/api/patroni/v1/patronicore_types.go @@ -188,6 +188,22 @@ type IntegrationTests struct { PgNodeQty int `json:"pgNodeQty,omitempty"` PodLabels map[string]string `json:"podLabels,omitempty"` Affinity v1.Affinity `json:"affinity,omitempty"` + AtpStorage *AtpStorage `json:"atpStorage,omitempty"` + AtpReport *AtpReport `json:"atpReport,omitempty"` + AtpReportViewUiUrl string `json:"atpReportViewUiUrl,omitempty"` + EnvironmentName string `json:"environmentName,omitempty"` +} + +type AtpStorage struct { + Provider string `json:"provider,omitempty"` + ServerUrl string `json:"serverUrl,omitempty"` + ServerUiUrl string `json:"serverUiUrl,omitempty"` + Bucket string `json:"bucket,omitempty"` + Region string `json:"region,omitempty"` +} + +type AtpReport struct { + Enabled bool `json:"enabled,omitempty"` } type Policies struct { diff --git a/operator/api/patroni/v1/zz_generated.deepcopy.go b/operator/api/patroni/v1/zz_generated.deepcopy.go index a9ed7bd0..d17f01b4 100644 --- a/operator/api/patroni/v1/zz_generated.deepcopy.go +++ b/operator/api/patroni/v1/zz_generated.deepcopy.go @@ -10,6 +10,36 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AtpReport) DeepCopyInto(out *AtpReport) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AtpReport. +func (in *AtpReport) DeepCopy() *AtpReport { + if in == nil { + return nil + } + out := new(AtpReport) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AtpStorage) DeepCopyInto(out *AtpStorage) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AtpStorage. +func (in *AtpStorage) DeepCopy() *AtpStorage { + if in == nil { + return nil + } + out := new(AtpStorage) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ConsulRegistration) DeepCopyInto(out *ConsulRegistration) { *out = *in @@ -110,6 +140,16 @@ func (in *IntegrationTests) DeepCopyInto(out *IntegrationTests) { } } in.Affinity.DeepCopyInto(&out.Affinity) + if in.AtpStorage != nil { + in, out := &in.AtpStorage, &out.AtpStorage + *out = new(AtpStorage) + **out = **in + } + if in.AtpReport != nil { + in, out := &in.AtpReport, &out.AtpReport + *out = new(AtpReport) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntegrationTests. diff --git a/operator/charts/patroni-core/crds/netcracker.com_patronicores.yaml b/operator/charts/patroni-core/crds/netcracker.com_patronicores.yaml index 2d37f6ca..5dbe055e 100644 --- a/operator/charts/patroni-core/crds/netcracker.com_patronicores.yaml +++ b/operator/charts/patroni-core/crds/netcracker.com_patronicores.yaml @@ -1017,6 +1017,28 @@ spec: x-kubernetes-list-type: atomic type: object type: object + atpReport: + properties: + enabled: + type: boolean + type: object + atpReportViewUiUrl: + type: string + atpStorage: + properties: + bucket: + type: string + provider: + type: string + region: + type: string + serverUiUrl: + type: string + serverUrl: + type: string + type: object + environmentName: + type: string image: type: string pgNodeQty: @@ -2636,9 +2658,10 @@ spec: operator: description: |- Operator represents a key's relationship to the value. - Valid operators are Exists and Equal. Defaults to Equal. + Valid operators are Exists, Equal, Lt, and Gt. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + Lt and Gt perform numeric comparisons (requires feature gate TaintTolerationComparisonOperators). type: string tolerationSeconds: description: |- diff --git a/operator/charts/patroni-core/templates/cr.yaml b/operator/charts/patroni-core/templates/cr.yaml index 4643ab0c..2f059691 100644 --- a/operator/charts/patroni-core/templates/cr.yaml +++ b/operator/charts/patroni-core/templates/cr.yaml @@ -243,6 +243,30 @@ spec: {{- end }} {{ end }} pgNodeQty: {{ ( include "postgres.replicasCount" . ) }} + {{- if or .Values.environmentName .Values.atpReport.enabled .Values.atpReportViewUiUrl }} + {{- if .Values.environmentName }} + environmentName: {{ .Values.environmentName | quote }} + {{- end }} + atpReport: + enabled: {{ .Values.atpReport.enabled | default false }} + {{- if .Values.atpReport.enabled }} + atpStorage: + provider: {{ .Values.atpReport.atpStorage.provider | quote }} + region: {{ .Values.atpReport.atpStorage.region | quote }} + {{- if .Values.atpReport.atpStorage.serverUrl }} + serverUrl: {{ .Values.atpReport.atpStorage.serverUrl | quote }} + {{- end }} + {{- if .Values.atpReport.atpStorage.serverUiUrl }} + serverUiUrl: {{ .Values.atpReport.atpStorage.serverUiUrl | quote }} + {{- end }} + {{- if .Values.atpReport.atpStorage.bucket }} + bucket: {{ .Values.atpReport.atpStorage.bucket | quote }} + {{- end }} + {{- end }} + {{- if .Values.atpReportViewUiUrl }} + atpReportViewUiUrl: {{ .Values.atpReportViewUiUrl | quote }} + {{- end }} + {{- end }} {{ end }} {{ if .Values.runTestsOnly }} runTestsTime: {{ now | unixEpoch | quote }} diff --git a/operator/charts/patroni-core/templates/tests-atp-storage-secret.yaml b/operator/charts/patroni-core/templates/tests-atp-storage-secret.yaml new file mode 100644 index 00000000..cafa1419 --- /dev/null +++ b/operator/charts/patroni-core/templates/tests-atp-storage-secret.yaml @@ -0,0 +1,12 @@ +{{- if and .Values.tests.install .Values.atpReport.enabled }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "patroni-core.fullname" . }}-tests-atp-storage-secret + labels: + {{- include "kubernetes.labels" . | nindent 4 }} +type: Opaque +stringData: + atp-storage-username: {{ .Values.atpReport.atpStorage.username }} + atp-storage-password: {{ .Values.atpReport.atpStorage.password }} +{{- end }} diff --git a/operator/charts/patroni-core/values.yaml b/operator/charts/patroni-core/values.yaml index 95cc749e..eaefd51e 100644 --- a/operator/charts/patroni-core/values.yaml +++ b/operator/charts/patroni-core/values.yaml @@ -291,6 +291,30 @@ tests: runTestsOnly: false +# ATP report upload (S3 under atpReport.atpStorage). CR and Secret only when atpReport.enabled. +atpReport: + enabled: false + atpStorage: + # S3 provider type: aws (AWS S3), minio, s3 (S3-compatible with custom endpoint) + provider: "aws" + # S3 API server URL (e.g., https://s3.amazonaws.com or https://minio.example.com) + serverUrl: "https://s3.amazonaws.com" + # Optional S3 UI URL for browsing results + serverUiUrl: "" + # S3 bucket name. If empty - S3 integration is disabled and results stay local + bucket: "" + # S3 region (required for AWS S3) + region: "us-east-1" + # S3 credentials (tests-atp-storage-secret when atpReport.enabled) + username: "" + password: "" + +# URL for viewing Allure reports (e.g., https://reports.example.com) +atpReportViewUiUrl: "" + +# Environment name for organizing test results path in S3 +environmentName: "pgskipper-operator" + INTERNAL_TLS_ENABLED: false GLOBAL_SECURITY_CONTEXT: true diff --git a/operator/charts/patroni-services/crds/netcracker.com_patroniservices.yaml b/operator/charts/patroni-services/crds/netcracker.com_patroniservices.yaml index 436e2cc3..96d4a07a 100644 --- a/operator/charts/patroni-services/crds/netcracker.com_patroniservices.yaml +++ b/operator/charts/patroni-services/crds/netcracker.com_patroniservices.yaml @@ -3553,6 +3553,28 @@ spec: x-kubernetes-list-type: atomic type: object type: object + atpReport: + properties: + enabled: + type: boolean + type: object + atpReportViewUiUrl: + type: string + atpStorage: + properties: + bucket: + type: string + provider: + type: string + region: + type: string + serverUiUrl: + type: string + serverUrl: + type: string + type: object + environmentName: + type: string image: type: string pgNodeQty: @@ -6139,9 +6161,10 @@ spec: operator: description: |- Operator represents a key's relationship to the value. - Valid operators are Exists and Equal. Defaults to Equal. + Valid operators are Exists, Equal, Lt, and Gt. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + Lt and Gt perform numeric comparisons (requires feature gate TaintTolerationComparisonOperators). type: string tolerationSeconds: description: |- diff --git a/operator/charts/patroni-services/templates/cr.yaml b/operator/charts/patroni-services/templates/cr.yaml index d397a2b5..599f94b3 100644 --- a/operator/charts/patroni-services/templates/cr.yaml +++ b/operator/charts/patroni-services/templates/cr.yaml @@ -423,6 +423,32 @@ spec: {{- end }} {{ end }} pgNodeQty: {{ default "1" .Values.patroni.replicas }} + {{- if or .Values.environmentName .Values.atpReport.enabled .Values.atpReportViewUiUrl }} + {{- if .Values.environmentName }} + environmentName: {{ .Values.environmentName | quote }} + {{- end }} + atpReport: + enabled: {{ .Values.atpReport.enabled | default false }} + {{- if .Values.atpReport.enabled }} + atpStorage: + provider: {{ .Values.atpReport.atpStorage.provider | quote }} + {{- if .Values.atpReport.atpStorage.serverUrl }} + serverUrl: {{ .Values.atpReport.atpStorage.serverUrl | quote }} + {{- end }} + {{- if .Values.atpReport.atpStorage.serverUiUrl }} + serverUiUrl: {{ .Values.atpReport.atpStorage.serverUiUrl | quote }} + {{- end }} + {{- if .Values.atpReport.atpStorage.bucket }} + bucket: {{ .Values.atpReport.atpStorage.bucket | quote }} + {{- end }} + {{- if .Values.atpReport.atpStorage.region }} + region: {{ .Values.atpReport.atpStorage.region | quote }} + {{- end }} + {{- end }} + {{- if .Values.atpReportViewUiUrl }} + atpReportViewUiUrl: {{ .Values.atpReportViewUiUrl | quote }} + {{- end }} + {{- end }} {{ end }} {{ if .Values.runTestsOnly }} runTestsTime: {{ now | unixEpoch | quote }} diff --git a/operator/charts/patroni-services/templates/tests-atp-storage-secret.yaml b/operator/charts/patroni-services/templates/tests-atp-storage-secret.yaml new file mode 100644 index 00000000..e5c5f4c4 --- /dev/null +++ b/operator/charts/patroni-services/templates/tests-atp-storage-secret.yaml @@ -0,0 +1,12 @@ +{{- if and .Values.tests.install .Values.atpReport.enabled }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "helm-chart.fullname" . }}-tests-atp-storage-secret + labels: + {{- include "kubernetes.labels" . | nindent 4 }} +type: Opaque +stringData: + atp-storage-username: {{ .Values.atpReport.atpStorage.username }} + atp-storage-password: {{ .Values.atpReport.atpStorage.password }} +{{- end }} diff --git a/operator/charts/patroni-services/values.yaml b/operator/charts/patroni-services/values.yaml index b6eca4e7..584982e4 100644 --- a/operator/charts/patroni-services/values.yaml +++ b/operator/charts/patroni-services/values.yaml @@ -177,6 +177,7 @@ backupDaemon: backupSchedule: "0 0/7 * * *" # pgHost: pg-patroni.postgres-service + # clusterDomain: cluster.local # K8s cluster DNS domain; used to build FQDN for pgHost (e.g. pg-patroni.NAMESPACE.svc.cluster.local) # The eviction policy for backup daemon: period and action. The default value is 7d/delete. evictionPolicy: "7d/delete" backupTimeout: 300 @@ -458,6 +459,30 @@ tests: runTestsOnly: false +# ATP report upload (S3 under atpReport.atpStorage). CR and Secret only when atpReport.enabled. +atpReport: + enabled: false + atpStorage: + # S3 provider type: aws (AWS S3), minio, s3 (S3-compatible with custom endpoint) + provider: "aws" + # S3 API server URL (e.g., https://s3.amazonaws.com or https://minio.example.com) + serverUrl: "https://s3.amazonaws.com" + # Optional S3 UI URL for browsing results + serverUiUrl: "" + # S3 bucket name. If empty - S3 integration is disabled and results stay local + bucket: "" + # S3 region (required for AWS S3) + region: "us-east-1" + # S3 credentials (tests-atp-storage-secret when atpReport.enabled) + username: "" + password: "" + +# URL for viewing Allure reports (e.g., https://reports.example.com) +atpReportViewUiUrl: "" + +# Environment name for organizing test results path in S3 +environmentName: "pgskipper-operator" + INTERNAL_TLS_ENABLED: false GLOBAL_SECURITY_CONTEXT: true @@ -474,3 +499,4 @@ CLOUD_PUBLIC_HOST: "k8s.default" # DBAAS_CLUSTER_DBA_CREDENTIALS_PASSWORD: "user-2" # MONITORING_ENABLED: false # INFRA_POSTGRES_FS_GROUP: 26 + diff --git a/operator/pkg/deployment/tests.go b/operator/pkg/deployment/tests.go index ecd2e663..50f0c6b5 100644 --- a/operator/pkg/deployment/tests.go +++ b/operator/pkg/deployment/tests.go @@ -32,6 +32,130 @@ var ( TestsLabels = map[string]string{"app": "patroni-tests"} ) +func appendAtpEnvVarsServices(env []corev1.EnvVar, tests *v1.IntegrationTests, secretName string) []corev1.EnvVar { + if tests == nil { + return env + } + + if tests.EnvironmentName != "" { + env = append(env, corev1.EnvVar{Name: "ENVIRONMENT_NAME", Value: tests.EnvironmentName}) + } + + if tests.AtpStorage != nil && tests.AtpStorage.Provider != "" { + env = append(env, corev1.EnvVar{Name: "ATP_STORAGE_PROVIDER", Value: tests.AtpStorage.Provider}) + + if tests.AtpStorage.Region != "" { + env = append(env, corev1.EnvVar{Name: "ATP_STORAGE_REGION", Value: tests.AtpStorage.Region}) + } + if tests.AtpStorage.ServerUrl != "" { + env = append(env, corev1.EnvVar{Name: "ATP_STORAGE_SERVER_URL", Value: tests.AtpStorage.ServerUrl}) + } + if tests.AtpStorage.ServerUiUrl != "" { + env = append(env, corev1.EnvVar{Name: "ATP_STORAGE_SERVER_UI_URL", Value: tests.AtpStorage.ServerUiUrl}) + } + if tests.AtpStorage.Bucket != "" { + env = append(env, corev1.EnvVar{Name: "ATP_STORAGE_BUCKET", Value: tests.AtpStorage.Bucket}) + } + } + + atpReportEnabled := false + if tests.AtpReport != nil { + atpReportEnabled = tests.AtpReport.Enabled + } + env = append(env, corev1.EnvVar{Name: "ATP_REPORT_ENABLED", Value: strconv.FormatBool(atpReportEnabled)}) + + if atpReportEnabled { + env = append(env, + corev1.EnvVar{ + Name: "ATP_STORAGE_USERNAME", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: secretName}, + Key: "atp-storage-username", + }, + }, + }, + corev1.EnvVar{ + Name: "ATP_STORAGE_PASSWORD", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: secretName}, + Key: "atp-storage-password", + }, + }, + }, + ) + } + + if tests.AtpReportViewUiUrl != "" { + env = append(env, corev1.EnvVar{Name: "ATP_REPORT_VIEW_UI_URL", Value: tests.AtpReportViewUiUrl}) + } + + return env +} + +func appendAtpEnvVarsCore(env []corev1.EnvVar, tests *patroniv1.IntegrationTests, secretName string) []corev1.EnvVar { + if tests == nil { + return env + } + + if tests.EnvironmentName != "" { + env = append(env, corev1.EnvVar{Name: "ENVIRONMENT_NAME", Value: tests.EnvironmentName}) + } + + if tests.AtpStorage != nil && tests.AtpStorage.Provider != "" { + env = append(env, corev1.EnvVar{Name: "ATP_STORAGE_PROVIDER", Value: tests.AtpStorage.Provider}) + + if tests.AtpStorage.Region != "" { + env = append(env, corev1.EnvVar{Name: "ATP_STORAGE_REGION", Value: tests.AtpStorage.Region}) + } + if tests.AtpStorage.ServerUrl != "" { + env = append(env, corev1.EnvVar{Name: "ATP_STORAGE_SERVER_URL", Value: tests.AtpStorage.ServerUrl}) + } + if tests.AtpStorage.ServerUiUrl != "" { + env = append(env, corev1.EnvVar{Name: "ATP_STORAGE_SERVER_UI_URL", Value: tests.AtpStorage.ServerUiUrl}) + } + if tests.AtpStorage.Bucket != "" { + env = append(env, corev1.EnvVar{Name: "ATP_STORAGE_BUCKET", Value: tests.AtpStorage.Bucket}) + } + } + + atpReportEnabled := false + if tests.AtpReport != nil { + atpReportEnabled = tests.AtpReport.Enabled + } + env = append(env, corev1.EnvVar{Name: "ATP_REPORT_ENABLED", Value: strconv.FormatBool(atpReportEnabled)}) + + if atpReportEnabled { + env = append(env, + corev1.EnvVar{ + Name: "ATP_STORAGE_USERNAME", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: secretName}, + Key: "atp-storage-username", + }, + }, + }, + corev1.EnvVar{ + Name: "ATP_STORAGE_PASSWORD", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: secretName}, + Key: "atp-storage-password", + }, + }, + }, + ) + } + + if tests.AtpReportViewUiUrl != "" { + env = append(env, corev1.EnvVar{Name: "ATP_REPORT_VIEW_UI_URL", Value: tests.AtpReportViewUiUrl}) + } + + return env +} + func NewIntegrationTestsPod(cr *v1.PatroniServices, cluster *patroniv1.PatroniClusterSettings) *corev1.Pod { testsSpec := cr.Spec.IntegrationTests tastsTags := "" @@ -75,7 +199,12 @@ func NewIntegrationTestsPod(cr *v1.PatroniServices, cluster *patroniv1.PatroniCl Image: dockerImage, ImagePullPolicy: cr.Spec.ImagePullPolicy, SecurityContext: util.GetDefaultSecurityContext(), - Args: []string{"robot", "-i", tastsTags, "/test_runs/"}, + // Args are not set: the integration-tests image is built with ENTRYPOINT/CMD that run + // the wrapper flow (see tests/Dockerfile: pgskipper-robot-entrypoint.sh, run-robot). + // Kubernetes Container.Args replaces the image CMD and would bypass that flow. + // Test selection is passed via TESTS_TAGS in env below. + // Previous explicit invocation was: + // Args: []string{"robot", "-i", tastsTags, "/test_runs/"}, Env: []corev1.EnvVar{ { Name: "POSTGRES_USER", @@ -144,6 +273,12 @@ func NewIntegrationTestsPod(cr *v1.PatroniServices, cluster *patroniv1.PatroniCl } } + pod.Spec.Containers[0].Env = appendAtpEnvVarsServices( + pod.Spec.Containers[0].Env, + testsSpec, + fmt.Sprintf("%s-tests-atp-storage-secret", cr.Name), + ) + return pod } @@ -192,7 +327,12 @@ func NewCoreIntegrationTests(cr *patroniv1.PatroniCore, cluster *patroniv1.Patro Image: dockerImage, ImagePullPolicy: cr.Spec.ImagePullPolicy, SecurityContext: util.GetDefaultSecurityContext(), - Args: []string{"robot", "-i", tastsTags, "/test_runs/"}, + // Args are not set: the integration-tests image is built with ENTRYPOINT/CMD that run + // the wrapper flow (see tests/Dockerfile: pgskipper-robot-entrypoint.sh, run-robot). + // Kubernetes Container.Args replaces the image CMD and would bypass that flow. + // Test selection is passed via TESTS_TAGS in env below. + // Previous explicit invocation was: + // Args: []string{"robot", "-i", tastsTags, "/test_runs/"}, Env: []corev1.EnvVar{ { Name: "POSTGRES_USER", @@ -265,5 +405,11 @@ func NewCoreIntegrationTests(cr *patroniv1.PatroniCore, cluster *patroniv1.Patro } } + pod.Spec.Containers[0].Env = appendAtpEnvVarsCore( + pod.Spec.Containers[0].Env, + testsSpec, + fmt.Sprintf("%s-tests-atp-storage-secret", cr.Name), + ) + return pod } diff --git a/services/backup-daemon/Dockerfile b/services/backup-daemon/Dockerfile index b77c1929..19d17f49 100644 --- a/services/backup-daemon/Dockerfile +++ b/services/backup-daemon/Dockerfile @@ -1,3 +1,4 @@ +#checkov:skip=CKV_DOCKER_3:User is not required for backup daemon container FROM --platform=$BUILDPLATFORM golang:1.25-alpine3.23 AS builder ENV GO111MODULE=on diff --git a/services/upgrade/docker/start.sh b/services/upgrade/docker/start.sh index 5929e305..9e910587 100755 --- a/services/upgrade/docker/start.sh +++ b/services/upgrade/docker/start.sh @@ -21,11 +21,11 @@ SLEEP_BETWEEN_ITERATIONS=5 function handle_master_upgrade() { cd /var/lib/pgsql/data/ - echo "[$(date +%Y-%m-%dT%H:%M:%S)] cur path: `pwd`" + echo "[$(date +%Y-%m-%dT%H:%M:%S)] cur path: $(pwd)" - DB_SIZE_GB_FLOAT=$(du -sk /var/lib/pgsql/data/${DATA_DIR} | awk '{ print $1 / 1024 / 1024 }') - DB_SIZE_GB=`printf "%.0f\n" ${DB_SIZE_GB_FLOAT}` + DB_SIZE_GB_FLOAT=$(du -sk "/var/lib/pgsql/data/${DATA_DIR}" | awk '{ print $1 / 1024 / 1024 }') + DB_SIZE_GB=$(printf "%.0f\n" "${DB_SIZE_GB_FLOAT}") PV_SIZE_GB=$(echo "${PV_SIZE}" | tr -dc '0-9') echo @@ -53,7 +53,7 @@ function handle_master_upgrade() { fi else echo "[$(date +%Y-%m-%dT%H:%M:%S)] Migration PV is NOT used, check if there is enough space for migration in master PV" - DOUBLE_DB_SIZE="$((${DB_SIZE_GB} * 2))" + DOUBLE_DB_SIZE="$((DB_SIZE_GB * 2))" if [[ ${DOUBLE_DB_SIZE} -gt ${PV_SIZE_GB} ]]; then echo "[$(date +%Y-%m-%dT%H:%M:%S)] DB size is more than PV size, exiting ..." exit 1 @@ -64,12 +64,14 @@ function handle_master_upgrade() { [ -d "$MIGRATION_PATH/tmp/pg" ] && echo "[$(date +%Y-%m-%dT%H:%M:%S)] Prev upgrade dir exists, removing .." && rm -rf "$MIGRATION_PATH/tmp/pg" echo "[$(date +%Y-%m-%dT%H:%M:%S)] initializing target db with parameters $INITDB_PARAMS" + # INITDB_PARAMS may contain multiple initdb flags; word splitting is intentional. + # shellcheck disable=SC2086 /usr/lib/postgresql/"${PG_VERSION_TARGET}"/bin/initdb ${INITDB_PARAMS} --pgdata="$MIGRATION_PATH/tmp/pg" echo "[$(date +%Y-%m-%dT%H:%M:%S)] initialize complete, copying configs" mkdir "/tmp/configs/" - cp /var/lib/pgsql/data/${DATA_DIR}/*.conf "/tmp/configs/" + cp /var/lib/pgsql/data/"${DATA_DIR}"/*.conf "/tmp/configs/" echo "turning off wal archiving" sed -e '/archive_command/ s/^#*/#/' -i "$MIGRATION_PATH/tmp/pg/postgresql.conf" @@ -87,18 +89,18 @@ function handle_master_upgrade() { echo "[$(date +%Y-%m-%dT%H:%M:%S)] making chmod 750 to datadir" - chmod 750 $MIGRATION_PATH/${DATA_DIR} + chmod 750 "${MIGRATION_PATH}/${DATA_DIR}" - SHARED_PRELOAD_LIBRARIES=$(grep "shared_preload_libraries" /var/lib/pgsql/data/${DATA_DIR}/postgresql.conf) + SHARED_PRELOAD_LIBRARIES=$(grep "shared_preload_libraries" "/var/lib/pgsql/data/${DATA_DIR}/postgresql.conf") if [[ -z ${SHARED_PRELOAD_LIBRARIES} ]]; then echo "shared_preload_libraries is not found in PostgreSQL config, please check PostgreSQL params, exiting..." exit 1 fi - echo ${SHARED_PRELOAD_LIBRARIES} >> $MIGRATION_PATH/tmp/pg/postgresql.conf + echo "${SHARED_PRELOAD_LIBRARIES}" >> "${MIGRATION_PATH}/tmp/pg/postgresql.conf" - ls -la $MIGRATION_PATH + ls -la "${MIGRATION_PATH}" echo "[$(date +%Y-%m-%dT%H:%M:%S)] Check cluster before upgrade" /usr/lib/postgresql/"${PG_VERSION_TARGET}"/bin/pg_upgrade \ @@ -205,8 +207,10 @@ function check_user(){ if ! whoami &> /dev/null; then if [[ -w /etc/passwd ]]; then - export USER_ID=$(id -u) - export GROUP_ID=$(id -g) + USER_ID=$(id -u) + export USER_ID + GROUP_ID=$(id -g) + export GROUP_ID echo "postgres:x:${USER_ID}:${GROUP_ID}:PostgreSQL Server:${PGDATA}:/bin/bash" >> /etc/passwd echo "UID added ..." fi @@ -220,18 +224,18 @@ function check_pgsql_version(){ # get version of data files PG_VERSION=$(head -n 1 "/var/lib/pgsql/data/${DATA_DIR}/PG_VERSION") - if python -c "import sys; sys.exit(0 if 11.0 <= float("${PG_VERSION}") < 12.0 else 1)"; then + if python -c "import sys; sys.exit(0 if 11.0 <= float(sys.argv[1]) < 12.0 else 1)" "${PG_VERSION}"; then PG_VERSION_TARGET="12" - elif python -c "import sys; sys.exit(0 if 10.0 <= float("${PG_VERSION}") < 11.0 else 1)"; then + elif python -c "import sys; sys.exit(0 if 10.0 <= float(sys.argv[1]) < 11.0 else 1)" "${PG_VERSION}"; then PG_VERSION_TARGET="11" else PG_VERSION_TARGET="10" fi - for i in {1..10}; do + for ((retry = 0; retry < 10; retry++)); do echo "[$(date +%Y-%m-%dT%H:%M:%S)] Will try to find target_version file" [ -f /var/lib/pgsql/data/target_version ] && echo "Target file found, will use version from this file" && \ - PG_VERSION_TARGET=`cat /var/lib/pgsql/data/target_version` && break + PG_VERSION_TARGET=$(cat /var/lib/pgsql/data/target_version) && break sleep 1 done diff --git a/tests/Dockerfile b/tests/Dockerfile index 43606367..bfc90e5f 100644 --- a/tests/Dockerfile +++ b/tests/Dockerfile @@ -1,11 +1,16 @@ -FROM ghcr.io/netcracker/qubership-docker-integration-tests:0.3.9 +FROM ghcr.io/netcracker/qubership-docker-integration-tests:main ENV LC_ALL=en_US.UTF-8 \ LANG=en_US.UTF-8 +# Base image may run as non-root; install as root so /root and site-packages are writable +USER root + COPY docker/pip.conf /root/.pip/pip.conf COPY docker/requirements.txt /root/requirements.txt +# Base image defines Alpine release; pinning apk packages here would duplicate index and break on parent bumps. +# hadolint ignore=DL3018 RUN set -x \ && apk add --update --no-cache build-base postgresql-dev \ && pip3 install --no-cache-dir -r /root/requirements.txt \ @@ -15,15 +20,22 @@ RUN chmod -R g=u /etc/passwd && \ mkdir /app && \ mkdir /test_runs +# Project structure (unchanged): app and robot under /app, /test_runs COPY ./app/* /app/ COPY robot /test_runs/ +# Also expose tests under base image layout so new scripts (adapter-S3, etc.) can run +COPY robot ${ROBOT_HOME}/tests + COPY docker/uid_entrypoint /opt/uid_entrypoint +# Wrapper: set TAGS from TESTS_TAGS then run base entrypoint (adapter-S3, run-robot) +COPY docker/pgskipper-robot-entrypoint.sh /opt/pgskipper-robot-entrypoint.sh RUN chgrp -R 0 /app && chmod g+w /app && \ chgrp -R 0 /test_runs && chmod -R g+w /test_runs && \ - chmod +x /opt/uid_entrypoint + chmod +x /opt/uid_entrypoint /opt/pgskipper-robot-entrypoint.sh && \ + chown -R 1000:0 "${ROBOT_HOME}" && chmod -R 775 "${ROBOT_HOME}" -# Volumes are defined to support read-only root file system +# Volumes as in original (read-only root FS support, etc.) VOLUME /etc VOLUME /app VOLUME /test_runs @@ -32,5 +44,6 @@ VOLUME /tmp USER 1001 WORKDIR /app -ENTRYPOINT [ "/opt/uid_entrypoint" ] -CMD ["robot -i ${TESTS_TAGS} /test_runs/"] +ENTRYPOINT ["/opt/uid_entrypoint"] +# Use new scripts flow (base entrypoint + adapter-S3); tags from TESTS_TAGS env +CMD ["/opt/pgskipper-robot-entrypoint.sh", "run-robot"] diff --git a/tests/docker/pgskipper-robot-entrypoint.sh b/tests/docker/pgskipper-robot-entrypoint.sh new file mode 100755 index 00000000..0970e7e3 --- /dev/null +++ b/tests/docker/pgskipper-robot-entrypoint.sh @@ -0,0 +1,6 @@ +#!/bin/bash +# Bridge TESTS_TAGS (set by operator) to TAGS (expected by base image entrypoint). +export TAGS="${TESTS_TAGS:-$TAGS}" +# Base scripts use ./tests and ./output relative to cwd; image has WORKDIR=/app but tests live in ROBOT_HOME/tests +cd "${ROBOT_HOME:-/opt/robot}" || exit 1 +exec /docker-entrypoint.sh "$@" diff --git a/tests/docker/requirements.txt b/tests/docker/requirements.txt index f1fe70d8..e7fd341e 100644 --- a/tests/docker/requirements.txt +++ b/tests/docker/requirements.txt @@ -1,3 +1,5 @@ +# Keep same set as before; align versions with base image to avoid pip conflicts +allure-robotframework==2.15.0 aniso8601==10.0.1 cachetools==4.2.4 certifi==2024.7.4 @@ -11,19 +13,23 @@ importlib-metadata==8.7.1 ipaddress==1.0.23 itsdangerous==2.1.2 Jinja2==3.1.6 -kubernetes==31.0.0 +# Align with base image to avoid K8s API/SSL errors (was 31.0.0) +kubernetes==34.1.0 MarkupSafe==3.0.3 oauthlib==3.3.1 +# Binary build — no build-base/postgresql-dev needed if aws (psycopg2-binary==2.9.10) psycopg2==2.9.10 pyasn1==0.6.3 pyasn1-modules==0.4.2 +# Match base image (was 2.4.0) PyJWT==2.4.0 python-dateutil==2.9.0.post0 python-string-utils==1.0.0 pytz==2026.1.post1 requests==2.32.5 requests-oauthlib==2.0.0 -robotframework==5.0 +# Match base image — avoid downgrade conflict (was 5.0) +robotframework==7.3.2 robotframework-databaselibrary==1.2.4 robotframework-requests==0.9.7 rsa==4.8 @@ -31,7 +37,8 @@ ruamel.yaml==0.19.1 ruamel.yaml.clib==0.2.12 six==1.16.0 typing_extensions==4.2.0 -urllib3==2.6.3 +# Align with base image (was 2.6.3) +urllib3~=2.3.0 websocket-client==1.9.0 Werkzeug==3.1.5 zipp==3.23.0