From 9df4c9f7d7596ced77d9bd6f763ac08444e9100f Mon Sep 17 00:00:00 2001 From: Rae Sharp Date: Thu, 7 May 2026 15:28:06 -0400 Subject: [PATCH 1/2] Removes duplicate latest version --- README.md | 69 ++ docusaurus.config.js | 6 + .../howtos/automation-and-gitops/overview.md | 4 +- .../howtos/scaling-resources.md | 2 +- .../howtos/space-observability.md | 2 +- .../howtos/automation-and-gitops/overview.md | 4 +- .../version-1.13/howtos/scaling-resources.md | 6 +- .../howtos/automation-and-gitops/overview.md | 4 +- .../version-1.14/howtos/scaling-resources.md | 6 +- .../howtos/automation-and-gitops/overview.md | 4 +- .../version-1.15/howtos/scaling-resources.md | 6 +- .../howtos/space-observability.md | 2 +- .../version-1.16/concepts/_category_.json | 7 - .../version-1.16/concepts/control-planes.md | 222 ----- .../version-1.16/concepts/groups.md | 123 --- .../version-1.16/howtos/_category_.json | 7 - .../version-1.16/howtos/api-connector.md | 408 ---------- .../version-1.16/howtos/attach-detach.md | 242 ------ .../version-1.16/howtos/auto-upgrade.md | 128 --- .../automation-and-gitops/_category_.json | 8 - .../howtos/automation-and-gitops/overview.md | 133 --- .../version-1.16/howtos/backup-and-restore.md | 516 ------------ .../version-1.16/howtos/billing.md | 302 ------- .../version-1.16/howtos/capacity-licensing.md | 593 -------------- .../version-1.16/howtos/certs.md | 274 ------- .../version-1.16/howtos/configure-ha.md | 445 ---------- .../howtos/control-plane-topologies.md | 553 ------------- .../version-1.16/howtos/controllers.md | 428 ---------- .../version-1.16/howtos/ctp-audit-logs.md | 544 ------------- .../version-1.16/howtos/ctp-connector.md | 499 ------------ .../version-1.16/howtos/debugging-a-ctp.md | 123 --- .../version-1.16/howtos/declarative-ctps.md | 105 --- .../version-1.16/howtos/deploy-query-api.md | 389 --------- .../version-1.16/howtos/deployment-reqs.md | 243 ------ .../version-1.16/howtos/dr.md | 412 ---------- .../version-1.16/howtos/gitops.md | 142 ---- .../version-1.16/howtos/hub-rbac.md | 76 -- .../howtos/ingress-nginx-migration.md | 764 ------------------ .../version-1.16/howtos/ingress.md | 226 ------ .../version-1.16/howtos/managed-service.md | 23 - .../howtos/managed-spaces-deployment.md | 267 ------ .../howtos/mcp-connector-guide.md | 164 ---- .../version-1.16/howtos/migrating-to-mcps.md | 434 ---------- .../version-1.16/howtos/mirror-images.md | 83 -- .../version-1.16/howtos/observability.md | 329 -------- .../version-1.16/howtos/oidc-configuration.md | 284 ------- .../version-1.16/howtos/query-api.md | 315 -------- .../version-1.16/howtos/scaling-resources.md | 179 ---- .../version-1.16/howtos/secrets-management.md | 727 ----------------- .../howtos/self-hosted-spaces-deployment.md | 488 ----------- .../version-1.16/howtos/simulations.md | 104 --- .../howtos/space-observability.md | 454 ----------- .../version-1.16/howtos/spaces-management.md | 213 ----- .../howtos/tracing/_category_.json | 8 - .../version-1.16/howtos/tracing/overview.md | 91 --- .../version-1.16/howtos/tracing/query-api.md | 89 -- .../version-1.16/howtos/tracing/spaces-api.md | 25 - .../howtos/tracing/spaces-router.md | 49 -- .../version-1.16/howtos/troubleshooting.md | 132 --- .../version-1.16/howtos/use-argo.md | 223 ----- .../howtos/workload-id/_category_.json | 11 - .../workload-id/backup-restore-config.md | 384 --------- .../howtos/workload-id/billing-config.md | 454 ----------- .../howtos/workload-id/eso-config.md | 503 ------------ .../version-1.16/overview/_category_.json | 4 - .../version-1.16/overview/index.md | 41 - .../version-1.16/reference/_category_.json | 5 - .../version-1.16/reference/index.md | 72 -- .../self-hosted-spaces-quickstart.md | 227 ------ .../version-1.16-sidebars.json | 130 --- self-hosted-spaces_versions.json | 2 +- src/theme/DocSidebar/Desktop/Content/index.js | 10 +- 72 files changed, 102 insertions(+), 14449 deletions(-) delete mode 100644 self-hosted-spaces_versioned_docs/version-1.16/concepts/_category_.json delete mode 100644 self-hosted-spaces_versioned_docs/version-1.16/concepts/control-planes.md delete mode 100644 self-hosted-spaces_versioned_docs/version-1.16/concepts/groups.md delete mode 100644 self-hosted-spaces_versioned_docs/version-1.16/howtos/_category_.json delete mode 100644 self-hosted-spaces_versioned_docs/version-1.16/howtos/api-connector.md delete mode 100644 self-hosted-spaces_versioned_docs/version-1.16/howtos/attach-detach.md delete mode 100644 self-hosted-spaces_versioned_docs/version-1.16/howtos/auto-upgrade.md delete mode 100644 self-hosted-spaces_versioned_docs/version-1.16/howtos/automation-and-gitops/_category_.json delete mode 100644 self-hosted-spaces_versioned_docs/version-1.16/howtos/automation-and-gitops/overview.md delete mode 100644 self-hosted-spaces_versioned_docs/version-1.16/howtos/backup-and-restore.md delete mode 100644 self-hosted-spaces_versioned_docs/version-1.16/howtos/billing.md delete mode 100644 self-hosted-spaces_versioned_docs/version-1.16/howtos/capacity-licensing.md delete mode 100644 self-hosted-spaces_versioned_docs/version-1.16/howtos/certs.md delete mode 100644 self-hosted-spaces_versioned_docs/version-1.16/howtos/configure-ha.md delete mode 100644 self-hosted-spaces_versioned_docs/version-1.16/howtos/control-plane-topologies.md delete mode 100644 self-hosted-spaces_versioned_docs/version-1.16/howtos/controllers.md delete mode 100644 self-hosted-spaces_versioned_docs/version-1.16/howtos/ctp-audit-logs.md delete mode 100644 self-hosted-spaces_versioned_docs/version-1.16/howtos/ctp-connector.md delete mode 100644 self-hosted-spaces_versioned_docs/version-1.16/howtos/debugging-a-ctp.md delete mode 100644 self-hosted-spaces_versioned_docs/version-1.16/howtos/declarative-ctps.md delete mode 100644 self-hosted-spaces_versioned_docs/version-1.16/howtos/deploy-query-api.md delete mode 100644 self-hosted-spaces_versioned_docs/version-1.16/howtos/deployment-reqs.md delete mode 100644 self-hosted-spaces_versioned_docs/version-1.16/howtos/dr.md delete mode 100644 self-hosted-spaces_versioned_docs/version-1.16/howtos/gitops.md delete mode 100644 self-hosted-spaces_versioned_docs/version-1.16/howtos/hub-rbac.md delete mode 100644 self-hosted-spaces_versioned_docs/version-1.16/howtos/ingress-nginx-migration.md delete mode 100644 self-hosted-spaces_versioned_docs/version-1.16/howtos/ingress.md delete mode 100644 self-hosted-spaces_versioned_docs/version-1.16/howtos/managed-service.md delete mode 100644 self-hosted-spaces_versioned_docs/version-1.16/howtos/managed-spaces-deployment.md delete mode 100644 self-hosted-spaces_versioned_docs/version-1.16/howtos/mcp-connector-guide.md delete mode 100644 self-hosted-spaces_versioned_docs/version-1.16/howtos/migrating-to-mcps.md delete mode 100644 self-hosted-spaces_versioned_docs/version-1.16/howtos/mirror-images.md delete mode 100644 self-hosted-spaces_versioned_docs/version-1.16/howtos/observability.md delete mode 100644 self-hosted-spaces_versioned_docs/version-1.16/howtos/oidc-configuration.md delete mode 100644 self-hosted-spaces_versioned_docs/version-1.16/howtos/query-api.md delete mode 100644 self-hosted-spaces_versioned_docs/version-1.16/howtos/scaling-resources.md delete mode 100644 self-hosted-spaces_versioned_docs/version-1.16/howtos/secrets-management.md delete mode 100644 self-hosted-spaces_versioned_docs/version-1.16/howtos/self-hosted-spaces-deployment.md delete mode 100644 self-hosted-spaces_versioned_docs/version-1.16/howtos/simulations.md delete mode 100644 self-hosted-spaces_versioned_docs/version-1.16/howtos/space-observability.md delete mode 100644 self-hosted-spaces_versioned_docs/version-1.16/howtos/spaces-management.md delete mode 100644 self-hosted-spaces_versioned_docs/version-1.16/howtos/tracing/_category_.json delete mode 100644 self-hosted-spaces_versioned_docs/version-1.16/howtos/tracing/overview.md delete mode 100644 self-hosted-spaces_versioned_docs/version-1.16/howtos/tracing/query-api.md delete mode 100644 self-hosted-spaces_versioned_docs/version-1.16/howtos/tracing/spaces-api.md delete mode 100644 self-hosted-spaces_versioned_docs/version-1.16/howtos/tracing/spaces-router.md delete mode 100644 self-hosted-spaces_versioned_docs/version-1.16/howtos/troubleshooting.md delete mode 100644 self-hosted-spaces_versioned_docs/version-1.16/howtos/use-argo.md delete mode 100644 self-hosted-spaces_versioned_docs/version-1.16/howtos/workload-id/_category_.json delete mode 100644 self-hosted-spaces_versioned_docs/version-1.16/howtos/workload-id/backup-restore-config.md delete mode 100644 self-hosted-spaces_versioned_docs/version-1.16/howtos/workload-id/billing-config.md delete mode 100644 self-hosted-spaces_versioned_docs/version-1.16/howtos/workload-id/eso-config.md delete mode 100644 self-hosted-spaces_versioned_docs/version-1.16/overview/_category_.json delete mode 100644 self-hosted-spaces_versioned_docs/version-1.16/overview/index.md delete mode 100644 self-hosted-spaces_versioned_docs/version-1.16/reference/_category_.json delete mode 100644 self-hosted-spaces_versioned_docs/version-1.16/reference/index.md delete mode 100644 self-hosted-spaces_versioned_docs/version-1.16/self-hosted-spaces-quickstart.md delete mode 100644 self-hosted-spaces_versioned_sidebars/version-1.16-sidebars.json diff --git a/README.md b/README.md index cd0780f4f..5ea682a96 100644 --- a/README.md +++ b/README.md @@ -3,6 +3,7 @@ This repo contains the Upbound documentation built with Docusaurus. * [Local Development](#local-development) +* [Self-Hosted Spaces versioning](#self-hosted-spaces-versioning) * [Style Guide](#style-guide) * [Code style guide](#code-style-guide) * [Markdown](#markdown) @@ -84,6 +85,74 @@ common errors. | `make help` | Display all available commands | +## Self-Hosted Spaces versioning + +The Self-Hosted Spaces docs are the only versioned plugin in this repo. UXP, +Cloud Spaces, and the rest are unversioned. + +### Layout + +- `self-hosted-spaces-docs/` — the live latest version. Served at + `/self-hosted-spaces/`. +- `self-hosted-spaces_versioned_docs/version-X.Y/` — frozen snapshots of past + versions. Served at `/self-hosted-spaces/X.Y/`. +- `self-hosted-spaces_versioned_sidebars/version-X.Y-sidebars.json` — sidebar + config for each frozen version. +- `self-hosted-spaces_versions.json` — the list of frozen versions, newest + first. Does **not** include the live latest. + +The label for the live latest is set in two places (keep them in sync): + +- `versions.current.label` in `docusaurus.config.js` +- `LATEST_VERSION` in `src/theme/DocSidebar/Desktop/Content/index.js` + +### Patching docs + +- **Live latest** — edit files in `self-hosted-spaces-docs/`. Changes go live at + `/self-hosted-spaces/...` on the next deploy. +- **An older version** — edit files in + `self-hosted-spaces_versioned_docs/version-X.Y/`. Older versions don't + inherit edits from the live latest. If a fix applies to multiple versions, + apply it to each tree. + +### Cutting a new version + +When shipping a new version, you snapshot the *current* state under the label +it currently represents, then bump the label for the next cycle. For example, +shipping 1.17 when the live latest is labeled 1.16: + +1. Make sure `self-hosted-spaces-docs/` reflects the final state of 1.16. +2. Snapshot current as 1.16: + + ```bash + npm run docusaurus -- docs:version:self-hosted-spaces 1.16 + ``` + + This copies `self-hosted-spaces-docs/` to + `self-hosted-spaces_versioned_docs/version-1.16/`, generates + `self-hosted-spaces_versioned_sidebars/version-1.16-sidebars.json` from the + current sidebar, and prepends `"1.16"` to `self-hosted-spaces_versions.json`. + +3. Bump the live-latest label in both places: + + - `versions.current.label` in `docusaurus.config.js` → `"1.17"` + - `LATEST_VERSION` in `src/theme/DocSidebar/Desktop/Content/index.js` → + `'1.17'` + +4. Apply 1.17 content changes to `self-hosted-spaces-docs/`. +5. Run `npm run clear && npm start` to verify the dropdown shows + `1.17 (Latest)` and `/self-hosted-spaces/1.16/` resolves to the new + snapshot. +6. Commit and open a PR. + +### Dropping an old version + +To stop publishing (for example) 1.13: + +1. Delete `self-hosted-spaces_versioned_docs/version-1.13/`. +2. Delete `self-hosted-spaces_versioned_sidebars/version-1.13-sidebars.json`. +3. Remove `"1.13"` from `self-hosted-spaces_versions.json`. + ## Style guide **TL;DR** diff --git a/docusaurus.config.js b/docusaurus.config.js index 6c4b3747e..4f46abb29 100644 --- a/docusaurus.config.js +++ b/docusaurus.config.js @@ -95,6 +95,12 @@ const config = { routeBasePath: "/self-hosted-spaces", sidebarPath: require.resolve("./src/sidebars/self-hosted-spaces.js"), includeCurrentVersion: true, + lastVersion: "current", + versions: { + current: { + label: "1.16", + }, + }, }, ], [ diff --git a/self-hosted-spaces-docs/howtos/automation-and-gitops/overview.md b/self-hosted-spaces-docs/howtos/automation-and-gitops/overview.md index bd8f8e728..a9f86548f 100644 --- a/self-hosted-spaces-docs/howtos/automation-and-gitops/overview.md +++ b/self-hosted-spaces-docs/howtos/automation-and-gitops/overview.md @@ -38,13 +38,13 @@ The way you configure GitOps depends on your deployment model: **Choose your path based on your deployment model:** -###. Cloud Spaces +### Cloud Spaces If you're using Upbound Cloud Spaces (Dedicated or Managed): 1. Start with [GitOps with Upbound Control Planes](/cloud-spaces/howtos/gitops-on-upbound/) 2. Learn how to integrate Argo CD with Cloud Spaces 3. Manage both control plane infrastructure and Upbound resources declaratively -###. Self-Hosted Spaces +### Self-Hosted Spaces If you're running self-hosted Spaces: 1. Start with [GitOps with ArgoCD in Self-Hosted Spaces](../gitops.md) 2. Learn how to configure control plane connection secrets diff --git a/self-hosted-spaces-docs/howtos/scaling-resources.md b/self-hosted-spaces-docs/howtos/scaling-resources.md index 239030254..aba74f0e5 100644 --- a/self-hosted-spaces-docs/howtos/scaling-resources.md +++ b/self-hosted-spaces-docs/howtos/scaling-resources.md @@ -157,7 +157,7 @@ controlPlanes: cpu: "500m" memory: "512Mi" ha: - enabled: true #. production environments + enabled: true # For production environments ``` Apply the configuration using Helm: diff --git a/self-hosted-spaces-docs/howtos/space-observability.md b/self-hosted-spaces-docs/howtos/space-observability.md index c03b68a5d..f53afe549 100644 --- a/self-hosted-spaces-docs/howtos/space-observability.md +++ b/self-hosted-spaces-docs/howtos/space-observability.md @@ -88,7 +88,7 @@ logs, and traces to your configured observability backends. This feature requires the [OpenTelemetry Operator][opentelemetry-operator] on the Space cluster. -Note: If running Spaces v1.11 or later, use OpenTelemetry Operator v0.110.0 or +Note: If running Spaces v1.16 or later, use OpenTelemetry Operator v0.139.0 or later due to breaking changes in the OpenTelemetry Operator. ## Configuration diff --git a/self-hosted-spaces_versioned_docs/version-1.13/howtos/automation-and-gitops/overview.md b/self-hosted-spaces_versioned_docs/version-1.13/howtos/automation-and-gitops/overview.md index 4eb96247c..ad6088d1b 100644 --- a/self-hosted-spaces_versioned_docs/version-1.13/howtos/automation-and-gitops/overview.md +++ b/self-hosted-spaces_versioned_docs/version-1.13/howtos/automation-and-gitops/overview.md @@ -38,13 +38,13 @@ The way you configure GitOps depends on your deployment model: **Choose your path based on your deployment model:** -###. Cloud Spaces +### Cloud Spaces If you're using Upbound Cloud Spaces (Dedicated or Managed): 1. Start with [GitOps with Upbound Control Planes](/cloud-spaces/howtos/gitops-on-upbound/) 2. Learn how to integrate Argo CD with Cloud Spaces 3. Manage both control plane infrastructure and Upbound resources declaratively -###. Self-Hosted Spaces +### Self-Hosted Spaces If you're running self-hosted Spaces: 1. Start with [GitOps with ArgoCD in Self-Hosted Spaces](../gitops-with-argocd.md) 2. Learn how to configure control plane connection secrets diff --git a/self-hosted-spaces_versioned_docs/version-1.13/howtos/scaling-resources.md b/self-hosted-spaces_versioned_docs/version-1.13/howtos/scaling-resources.md index 0b3a21257..aba74f0e5 100644 --- a/self-hosted-spaces_versioned_docs/version-1.13/howtos/scaling-resources.md +++ b/self-hosted-spaces_versioned_docs/version-1.13/howtos/scaling-resources.md @@ -78,8 +78,8 @@ controlPlanes: For AWS: - Use GP3 volumes with adequate IOPS --. AWS GP3 volumes, IOPS scale with volume size (3000 IOPS baseline) --. optimal performance, provision at least 32Gi to support up to 16,000 IOPS +- For AWS GP3 volumes, IOPS scale with volume size (3000 IOPS baseline) +- For optimal performance, provision at least 32Gi to support up to 16,000 IOPS For GCP and Azure: - Use SSD-based persistent disk types for optimal performance @@ -157,7 +157,7 @@ controlPlanes: cpu: "500m" memory: "512Mi" ha: - enabled: true #. production environments + enabled: true # For production environments ``` Apply the configuration using Helm: diff --git a/self-hosted-spaces_versioned_docs/version-1.14/howtos/automation-and-gitops/overview.md b/self-hosted-spaces_versioned_docs/version-1.14/howtos/automation-and-gitops/overview.md index 4eb96247c..ad6088d1b 100644 --- a/self-hosted-spaces_versioned_docs/version-1.14/howtos/automation-and-gitops/overview.md +++ b/self-hosted-spaces_versioned_docs/version-1.14/howtos/automation-and-gitops/overview.md @@ -38,13 +38,13 @@ The way you configure GitOps depends on your deployment model: **Choose your path based on your deployment model:** -###. Cloud Spaces +### Cloud Spaces If you're using Upbound Cloud Spaces (Dedicated or Managed): 1. Start with [GitOps with Upbound Control Planes](/cloud-spaces/howtos/gitops-on-upbound/) 2. Learn how to integrate Argo CD with Cloud Spaces 3. Manage both control plane infrastructure and Upbound resources declaratively -###. Self-Hosted Spaces +### Self-Hosted Spaces If you're running self-hosted Spaces: 1. Start with [GitOps with ArgoCD in Self-Hosted Spaces](../gitops-with-argocd.md) 2. Learn how to configure control plane connection secrets diff --git a/self-hosted-spaces_versioned_docs/version-1.14/howtos/scaling-resources.md b/self-hosted-spaces_versioned_docs/version-1.14/howtos/scaling-resources.md index 0b3a21257..aba74f0e5 100644 --- a/self-hosted-spaces_versioned_docs/version-1.14/howtos/scaling-resources.md +++ b/self-hosted-spaces_versioned_docs/version-1.14/howtos/scaling-resources.md @@ -78,8 +78,8 @@ controlPlanes: For AWS: - Use GP3 volumes with adequate IOPS --. AWS GP3 volumes, IOPS scale with volume size (3000 IOPS baseline) --. optimal performance, provision at least 32Gi to support up to 16,000 IOPS +- For AWS GP3 volumes, IOPS scale with volume size (3000 IOPS baseline) +- For optimal performance, provision at least 32Gi to support up to 16,000 IOPS For GCP and Azure: - Use SSD-based persistent disk types for optimal performance @@ -157,7 +157,7 @@ controlPlanes: cpu: "500m" memory: "512Mi" ha: - enabled: true #. production environments + enabled: true # For production environments ``` Apply the configuration using Helm: diff --git a/self-hosted-spaces_versioned_docs/version-1.15/howtos/automation-and-gitops/overview.md b/self-hosted-spaces_versioned_docs/version-1.15/howtos/automation-and-gitops/overview.md index 4eb96247c..ad6088d1b 100644 --- a/self-hosted-spaces_versioned_docs/version-1.15/howtos/automation-and-gitops/overview.md +++ b/self-hosted-spaces_versioned_docs/version-1.15/howtos/automation-and-gitops/overview.md @@ -38,13 +38,13 @@ The way you configure GitOps depends on your deployment model: **Choose your path based on your deployment model:** -###. Cloud Spaces +### Cloud Spaces If you're using Upbound Cloud Spaces (Dedicated or Managed): 1. Start with [GitOps with Upbound Control Planes](/cloud-spaces/howtos/gitops-on-upbound/) 2. Learn how to integrate Argo CD with Cloud Spaces 3. Manage both control plane infrastructure and Upbound resources declaratively -###. Self-Hosted Spaces +### Self-Hosted Spaces If you're running self-hosted Spaces: 1. Start with [GitOps with ArgoCD in Self-Hosted Spaces](../gitops-with-argocd.md) 2. Learn how to configure control plane connection secrets diff --git a/self-hosted-spaces_versioned_docs/version-1.15/howtos/scaling-resources.md b/self-hosted-spaces_versioned_docs/version-1.15/howtos/scaling-resources.md index 0b3a21257..aba74f0e5 100644 --- a/self-hosted-spaces_versioned_docs/version-1.15/howtos/scaling-resources.md +++ b/self-hosted-spaces_versioned_docs/version-1.15/howtos/scaling-resources.md @@ -78,8 +78,8 @@ controlPlanes: For AWS: - Use GP3 volumes with adequate IOPS --. AWS GP3 volumes, IOPS scale with volume size (3000 IOPS baseline) --. optimal performance, provision at least 32Gi to support up to 16,000 IOPS +- For AWS GP3 volumes, IOPS scale with volume size (3000 IOPS baseline) +- For optimal performance, provision at least 32Gi to support up to 16,000 IOPS For GCP and Azure: - Use SSD-based persistent disk types for optimal performance @@ -157,7 +157,7 @@ controlPlanes: cpu: "500m" memory: "512Mi" ha: - enabled: true #. production environments + enabled: true # For production environments ``` Apply the configuration using Helm: diff --git a/self-hosted-spaces_versioned_docs/version-1.15/howtos/space-observability.md b/self-hosted-spaces_versioned_docs/version-1.15/howtos/space-observability.md index 8f4970587..a4ce4ce20 100644 --- a/self-hosted-spaces_versioned_docs/version-1.15/howtos/space-observability.md +++ b/self-hosted-spaces_versioned_docs/version-1.15/howtos/space-observability.md @@ -173,7 +173,7 @@ The sampling behavior depends on whether a parent trace context exists: - **With parent context**: If a `traceparent` header is present, the parent's sampling decision is respected, enabling proper distributed tracing across services. -- **Root spans**:. new traces without a parent, Envoy samples based on +- **Root spans**: For new traces without a parent, Envoy samples based on `x-request-id` hashing. The default sampling rate is 10%. #### TLS configuration for external collectors diff --git a/self-hosted-spaces_versioned_docs/version-1.16/concepts/_category_.json b/self-hosted-spaces_versioned_docs/version-1.16/concepts/_category_.json deleted file mode 100644 index 4b8667e29..000000000 --- a/self-hosted-spaces_versioned_docs/version-1.16/concepts/_category_.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "label": "Concepts", - "position": 2, - "collapsed": true -} - - diff --git a/self-hosted-spaces_versioned_docs/version-1.16/concepts/control-planes.md b/self-hosted-spaces_versioned_docs/version-1.16/concepts/control-planes.md deleted file mode 100644 index f29c79206..000000000 --- a/self-hosted-spaces_versioned_docs/version-1.16/concepts/control-planes.md +++ /dev/null @@ -1,222 +0,0 @@ ---- -title: Control Planes -weight: 1 -description: An overview of control planes in Upbound ---- - - -Control planes in Upbound are fully isolated Crossplane control plane instances that Upbound manages for you. This means: - -- the underlying lifecycle of infrastructure (compute, memory, and storage) required to power your instance. -- scaling of the infrastructure. -- the maintenance of the core Crossplane components that make up a control plane. - -This lets users focus on building their APIs and operating their control planes, while Upbound handles the rest. Each control plane has its own dedicated API server connecting users to their control plane. - - -## Control plane architecture - -![Managed Control Plane Architecture](/img/mcp.png) - -Along with underlying infrastructure, Upbound manages the Crossplane system components. You don't need to manage the Crossplane API server or core resource controllers because Upbound manages your control plane lifecycle from creation to deletion. - -### Crossplane API - -Each control plane offers a unified endpoint. You interact with your control plane through Kubernetes and Crossplane API calls. Each control plane runs a Kubernetes API server to handle API requests. You can make API calls in the following ways: - -- Direct calls: HTTP/gRPC -- Indirect calls: the up CLI, Kubernetes clients such as kubectl, or the Upbound Console. - -Like in Kubernetes, the API server is the hub for all communication for the control plane. All internal components such as system processes and provider controllers act as clients of the API server. - -Your API requests tell Crossplane your desired state for the resources your control plane manages. Crossplane attempts to constantly maintain that state. Crossplane lets you configure objects in the API either imperatively or declaratively. - -### Crossplane versions and features - -Upbound automatically upgrades Crossplane system components on control planes to new Crossplane versions for updated features and improvements in the open source project. With [automatic upgrades][automatic-upgrades], you choose the cadence that Upbound automatically upgrades the system components in your control plane. You can also choose to manually upgrade your control plane to a different Crossplane version. - -For detailed information on versions and upgrades, refer to the [release notes][release-notes] and the automatic upgrade documentation. If you don't enroll a control plane in a release channel, Upbound doesn't apply automatic upgrades. - -Features considered "alpha" in Crossplane are by default not supported in a control plane unless otherwise specified. - -### Hosting environments - -Every control plane in Upbound belongs to a [control plane group][control-plane-group]. Control plane groups are a logical grouping of one or more control planes with shared objects (such as secrets or backup configuration). Every group resides in a [Space][space] in Upbound, which are hosting environments for control planes. - -Think of a Space as being conceptually the same as an AWS, Azure, or GCP region. Regardless of the Space type you run a control plane in, the core experience is identical. - -## Management - -### Create a control plane - -You can create a new control plane from the Upbound Console, [up CLI][up-cli], or with Kubernetes clients such as `kubectl`. - - - - - -To use the CLI, run the following: - -```shell -up ctp create -``` - -To learn more about control plane-related commands in `up`, go to the [CLI reference][cli-reference] documentation. - - - -You can create and manage control planes declaratively in Upbound. Before you -begin, ensure you're logged into Upbound and set the correct context: - -```bash -up login -# Example: acmeco/upbound-gcp-us-west-1/default -up ctx ${yourOrganization}/${yourSpace}/${yourGroup} -```` - -```yaml -#controlplane-a.yaml -apiVersion: spaces.upbound.io/v1beta1 -kind: ControlPlane -metadata: - name: controlplane-a -spec: - crossplane: - autoUpgrade: - channel: Rapid -``` - -```bash -kubectl apply -f controlplane-a.yaml -``` - - - - - -### Connect directly to your control plane - -Each control plane offers a unified endpoint. You interact with your control plane through Kubernetes and Crossplane API calls. Each control plane runs a Kubernetes API server to handle API requests. - -You can connect to a control plane's API server directly via the up CLI. Use the [`up ctx`][up-ctx] command to set your kubeconfig's current context to a control plane: - -```shell -# Example: acmeco/upbound-gcp-us-west-1/default/ctp1 -up ctx ${yourOrganization}/${yourSpace}/${yourGroup}/${yourControlPlane} -``` - -To disconnect from your control plane and revert your kubeconfig's current context to the previous entry, run the following: - -```shell -up ctx .. -``` - -You can also generate a `kubeconfig` file for a control plane with [`up ctx -f`][up-ctx-f]. - -```shell -up ctx ${yourOrganization}/${yourSpace}/${yourGroup}/${yourControlPlane} -f - > ctp-kubeconfig.yaml -``` - -:::tip -To learn more about how to use `up ctx` to navigate different contexts in Upbound, read the [CLI documentation][cli-documentation]. -::: - -## Configuration - -When you create a new control plane, Upbound provides you with a fully isolated instance of Crossplane. Configure your control plane by installing packages that extend its capabilities, like to create and manage the lifecycle of new types of infrastructure resources. - -You're encourage to install any available Crossplane package type (Providers, Configurations, Functions) available in the [Upbound Marketplace][upbound-marketplace] on your control planes. - -### Install packages - -Below are a couple ways to install Crossplane packages on your control plane. - - - - - - -Use the `up` CLI to install Crossplane packages from the [Upbound Marketplace][upbound-marketplace-1] on your control planes. Connect directly to your control plane via `up ctx`. Then, to install a provider: - -```shell -up ctp provider install xpkg.upbound.io/upbound/provider-family-aws -``` - -To install a Configuration: - -```shell -up ctp configuration install xpkg.upbound.io/upbound/platform-ref-aws -``` - -To install a Function: - -```shell -up ctp function install xpkg.upbound.io/crossplane-contrib/function-kcl -``` - - -You can use kubectl to directly apply any Crossplane manifest. Below is an example for installing a Crossplane provider: - -```yaml -cat < - - - -For production-grade scenarios, it's recommended you configure your control plane declaratively via Git plus a Continuous Delivery (CD) Engine such as Argo. For guidance on this topic, read [GitOps with control planes][gitops-with-control-planes]. - - - - - - -### Configure Crossplane ProviderConfigs - -#### ProviderConfigs with OpenID Connect - -Use OpenID Connect (`OIDC`) to authenticate to Upbound control planes without credentials. OIDC lets your control plane exchange short-lived tokens directly with your cloud provider. Read how to [connect control planes to external services][connect-control-planes-to-external-services] to learn more. - -#### Generic ProviderConfigs - -The Upbound Console doesn't allow direct editing of ProviderConfigs that don't support `Upbound` authentication. To edit these ProviderConfigs on your control plane, connect to the control plane directly by following the instructions in the previous section and using `kubectl`. - -### Configure secrets - -Upbound gives users the ability to configure the synchronization of secrets from external stores into control planes. Configure this capability at the group-level, explained in the [Spaces documentation][spaces-documentation]. - -### Configure backups - -Upbound gives users the ability to configure backup schedules, take impromptu backups, and conduct self-service restore operations. Configure this capability at the group-level, explained in the [Spaces documentation][spaces-documentation-1]. - -### Configure telemetry - - -Upbound gives users the ability to configure the collection of telemetry (logs, metrics, and traces) in their control planes. Using Upbound's built-in [OTEL][otel] support, you can stream this data out to your preferred observability solution. Configure this capability at the group-level, explained in the [Spaces documentation][spaces-documentation-2]. - - - -[automatic-upgrades]: /self-hosted-spaces/howtos/auto-upgrade -[release-notes]: https://github.com/upbound/universal-crossplane/releases -[control-plane-group]: /self-hosted-spaces/concepts/groups -[space]: /self-hosted-spaces/overview -[up-cli]: /reference/cli-reference -[cli-reference]: /reference/cli-reference -[up-ctx]: /reference/cli-reference -[up-ctx-f]: /reference/cli-reference -[cli-documentation]: /manuals/cli/concepts/contexts -[upbound-marketplace]: https://marketplace.upbound.io -[upbound-marketplace-1]: https://marketplace.upbound.io -[gitops-with-control-planes]: /cloud-spaces/howtos/gitops-on-upbound -[connect-control-planes-to-external-services]: /manuals/platform/oidc -[spaces-documentation]: /self-hosted-spaces/howtos/secrets-management -[spaces-documentation-1]: /self-hosted-spaces/howtos/backup-and-restore -[otel]: https://otel.com -[spaces-documentation-2]: /self-hosted-spaces/howtos/observability diff --git a/self-hosted-spaces_versioned_docs/version-1.16/concepts/groups.md b/self-hosted-spaces_versioned_docs/version-1.16/concepts/groups.md deleted file mode 100644 index 5913663c3..000000000 --- a/self-hosted-spaces_versioned_docs/version-1.16/concepts/groups.md +++ /dev/null @@ -1,123 +0,0 @@ ---- -title: Control Plane Groups -sidebar_position: 2 -description: An introduction to the Control Plane Groups in Upbound -plan: "enterprise" ---- - - - -In Upbound, Control Plane Groups (or just, 'groups') are a logical grouping of one or more control planes with shared resources like [secrets][secrets] or [backups][backups]. It's a mechanism for isolating these groups of resources within a single [Space][space]. All role-based access control in Upbound happens at the control plane group-level. - -## When to use multiple groups - -You should use groups in environments where there's a need to have Crossplane manage infrastructure across multiple cloud accounts or projects. For users who only need to deploy and manage resources in a couple cloud accounts, you shouldn't need to think about groups at all. - -Groups are a way to divide access in Upbound between multiple teams. Think of a group as being analogous to a Kubernetes _namespace_. - -## The 'default' group - -Every Cloud Space in Upbound has a group named _default_ available. - -## Working with groups - -### View groups - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - - - - -```shell -up group list -``` - - - - -```shell -kubectl get namespaces -l spaces.upbound.io/group=true -``` - - - - -### Set the group for a request - -Several commands in _up_ have a group context. To set the group for a request, use the `--group` flag: - -```shell -up ctp list --group=team1 -``` -```shell -up ctp create new-ctp --group=team2 -``` - -### Set the group preference - -The _up_ CLI operates upon a single [Upbound context][upbound-context]. Whatever context gets set is then used as the preference for other commands. An Upbound context is capable of pointing at a variety of altitudes: - -1. A Space in Upbound -2. A group within a Space -3. a control plane within a group - -To set the group preference, use `up ctx` to choose a group as your preferred Upbound context. For example: - -```shell -# This sets the context for the up CLI to the default group in an Upbound-managed Cloud Space (gcp-us-west-1) for an organization called 'acmeco' -up ctx acmeco/upbound-gcp-us-west-1/default/ -``` - -### Create a group - -To create a group, login to Upbound and set your context to your desired Space: - -```shell -up login -up ctx '/' -# Example: up ctx acmeco/upbound-gcp-us-west-1 -``` - - -Create a group: - -```shell -up group create my-new-group -``` - -### Delete a group - -To delete a group, login to Upbound and set your context to your desired Space: - -```shell -up login -up ctx '/' -# Example: up ctx acmeco/upbound-gcp-us-west-1 -``` - -Delete a group: - -```shell -up group delete my-new-group -``` - -### Protected groups - -Once a control plane gets created in a group, Upbound enforces a protection policy on the group. Upbound prevents accidental deletion of the group. To delete a group that has control planes in it, you should first delete all control planes in the group. - -## Groups in the context of single-tenant Spaces - -Upbound offers a variety of deployment models to use the product. If you deploy your own single-tenant Upbound Space (whether connected or disconnected), you're self-hosting Upbound software in a Kubernetes cluster. In these environments, a control plane group maps to a corresponding namespace in the cluster which hosts the Space. - -Most Kubernetes clusters come with some set of predefined namespaces. Because a group maps to a corresponding Kubernetes namespace, whenever a group gets created, there too must be a Kubernetes namespace accordingly. When the Spaces software is newly installed, no groups exist. You _can_ elevate a Kubernetes namespace to become a group by doing the following: - -1. Creating a group with the same name as a preexisting Kubernetes namespace -2. Creating a control plane in a preexisting Kubernetes namespace -3. Labeling a Kubernetes namespace with the label `spaces.upbound.io/group=true` - - -[secrets]: /self-hosted-spaces/howtos/secrets-management -[backups]: /self-hosted-spaces/howtos/workload-id/backup-restore-config/ -[space]: /self-hosted-spaces/overview -[upbound-context]: /manuals/cli/concepts/contexts diff --git a/self-hosted-spaces_versioned_docs/version-1.16/howtos/_category_.json b/self-hosted-spaces_versioned_docs/version-1.16/howtos/_category_.json deleted file mode 100644 index d3a8547aa..000000000 --- a/self-hosted-spaces_versioned_docs/version-1.16/howtos/_category_.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "label": "How-tos", - "position": 3, - "collapsed": true -} - - diff --git a/self-hosted-spaces_versioned_docs/version-1.16/howtos/api-connector.md b/self-hosted-spaces_versioned_docs/version-1.16/howtos/api-connector.md deleted file mode 100644 index 20ca757d4..000000000 --- a/self-hosted-spaces_versioned_docs/version-1.16/howtos/api-connector.md +++ /dev/null @@ -1,408 +0,0 @@ ---- -title: API Connector -weight: 90 -description: Connect Kubernetes clusters to remote Crossplane control planes for resource synchronization -aliases: - - /api-connector - - /concepts/api-connector ---- - -:::warning -API Connector is currently in **Preview**. The feature is under active -development and subject to breaking changes. Use for testing and evaluation -purposes only. -::: - -API Connector enables seamless integration between Kubernetes application -clusters consuming APIs and remote Crossplane control planes providing and -reconciling APIs. - - -The API Connector decouples where Crossplane runs from where teams consume its -APIs. For example, Crossplane may run in an Upbound control plane while an -existing Kubernetes cluster consumes its APIs. - -The Control Plane Connector offers coarse-grained connectivity between app -clusters and a control plane. The API Connector lets you choose -which APIs to expose across multiple clusters. - -## Architecture overview - -![API Connector Architecture](/img/api-connector.png) - -API Connector uses a **provider-consumer** model: - -- **Provider control plane**: The Upbound control plane that provides APIs and manages infrastructure. -- **Consumer cluster**: Any Kubernetes cluster where its users wants to use APIs provided by the provider control plane, without having to run Crossplane. API connector gets installed in the consumer cluster, and bidirectionally syncs API objects to the provider. - -### Key components - -**Custom Resource Definitions (CRDs)**: - - -- `ClusterConnection`: Establishes a connection from the consumer to the provider cluster. Pulls bindable CRD APIs from the provider into the consumer cluster for use. - -- `ClusterAPIBinding`: Instructs API connector to sync all API objects cluster-wide with a given API group to a given provider cluster. -- `APIBinding`: Namespaced version of `ClusterAPIBinding`. Instructs API connector to sync API objects within a given namespace and with a given API group to a given provider cluster. - - -## Prerequisites - -Before using API Connector, ensure: - -1. **Consumer cluster** has network access to the provider control plane -1. You have an license to use API connector. If you are unsure, [contact Upbound][contact] or your sales representative. - -This guide walks through how to automate connecting your cluster to an Upbound -control plane. You can also manually configure the API Connector. - -## Publishing APIs in the provider cluster - - - - -First, log in to your provider control plane, and choose which CRD APIs you want -to make accessible to the consumer cluster's. API connector only syncs -these "bindable" CRDs. - - - - - - -Use the `up` CLI to login: - -```bash -up login -``` - -Connect to your control plane: - -```bash -up ctx -``` - -Check what CRDs are available: - -```bash -kubectl get crds -``` - - -Label all CRDs you want to publish with the bindable label: - - -```bash -kubectl label crd 'connect.upbound.io/bindable'='true' --overwrite -``` - - - - -Change context to the provider cluster: -```bash -kubectl config set-context -``` - -Check what CRDs are available: -```bash -kubectl get crds -``` - - -Label all CRDs you want to publish with the bindable label - -```bash -kubectl label crd 'connect.upbound.io/bindable'='true' --overwrite -``` - - - -## Installation - - - - -The up CLI provides the simplest installation method with automatic -configuration: - -Make sure the current Kubeconfig context is set to the **provider control plane** -```bash -up ctx - -up controlplane api-connector install --consumer-kubeconfig [OPTIONS] -``` - -The command: -1. creates a Robot account (named ``) in the Upbound Cloud organization ``, -1. Gives the created robot account `admin` permissions to the provider control plane `` -1. Generates a JWT token for the robot account, and stores it in a Kubernetes Secret in the consumer cluster. -1. Installs the API connector Helm chart in the consumer cluster. -1. Creates a `ClusterConnection` object in the consumer cluster, referring to the newly generated Secret, so that API connector can authenticate successfully to the provider control plane. -1. API connector pulls all published CRDs from the previous step into the consumer cluster. - -**Example**: -```bash -up controlplane api-connector install \ - --consumer-kubeconfig ~/.kube/config \ - --consumer-context my-cluster \ - --upbound-token -``` - -This command uses provided token to authenticate with the **Provider control plane** -and create a `ClusterConnection` resource in the **Consumer cluster** to connect to the -**Provider control plane**. - -**Key Options**: -- `--consumer-kubeconfig`: Path to consumer cluster kubeconfig (required) -- `--consumer-context`: Context name for consumer cluster (required) -- `--name`: Custom name for connection resources (optional) -- `--upbound-token`: API token for authentication (optional) -- `--upgrade`: Upgrade existing installation (optional) -- `--version`: Specific version to install (optional) - - - - -For manual installation or custom configurations: - -```bash -helm upgrade --install api-connector oci://xpkg.upbound.io/spaces-artifacts/api-connector \ - --namespace upbound-system \ - --create-namespace \ - --version \ - --set consumerClusterDisplayName= -``` - -### Authentication methods - -API Connector supports two authentication methods: - - - - -For Upbound Spaces integration: - -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: spaces-secret - namespace: upbound-system -type: Opaque -stringData: - token: - organization: - spacesBaseURL: - controlPlaneGroupName: - controlPlaneName: -``` - - - -For direct cluster access: - -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: provider-kubeconfig - namespace: upbound-system -type: Opaque -data: - kubeconfig: -``` - - - - -### Connection setup - -Create a `ClusterConnection` to establish connectivity: - - - - -```yaml -apiVersion: connect.upbound.io/v1alpha1 -kind: ClusterConnection -metadata: - name: spaces-connection - namespace: upbound-system -spec: - secretRef: - kind: UpboundRobotToken - name: spaces-secret - namespace: upbound-system - crdManagement: - pullBehavior: Pull -``` - - - - -```yaml -apiVersion: connect.upbound.io/v1alpha1 -kind: ClusterConnection -metadata: - name: provider-connection - namespace: upbound-system -spec: - secretRef: - kind: KubeConfig - name: provider-kubeconfig - namespace: upbound-system - crdManagement: - pullBehavior: Pull -``` - - - - - - - -### Configuration - -Bind APIs to make them available in your consumer cluster: - -```yaml -apiVersion: connect.upbound.io/v1alpha1 -kind: ClusterAPIBinding -metadata: - name: -spec: - connectionRef: - kind: ClusterConnection - name: # Or --name value -``` - - - - -The `ClusterAPIBinding` name must match the **Resource.Group** (name of the CustomResourceDefinition) of the CRD you want to bind. - - - - -## Usage example - -After configuration, you can create API objects (in the consumer cluster) to -synchronize to the provider cluster: - -```yaml -apiVersion: nop.example.org/v1alpha1 -kind: NopResource -metadata: - name: my-resource - namespace: default -spec: - coolField: "Synchronized resource" - compositeDeletePolicy: Foreground -``` - -Verify the resource status: - -```bash -kubectl get nopresource my-resource -o yaml - -``` -When the `APIBound=True` condition is present, it means that the API object -synced to the provider cluster, where reconciliation occurs. Whenever the -API object in the provider cluster gets status updates (for example -`Ready=True`), that status syncs back to the consumer cluster. - - -Switch contexts to the provider cluster to see the API object being created: - -```bash -up ctx -# or kubectl config set-context -``` - -```bash -kubectl get nopresource my-resource -o yaml -``` - -Note that in the provider cluster, the API object is labeled with information on -where the API object originates from, and `connect.upbound.io/managed=true`. - -## Monitoring and troubleshooting - -### Check connection status - -```bash -kubectl get clusterconnection -``` - -Expected output: -``` -NAME STATUS MESSAGE -spaces-connection Ready Provider controlplane is available -``` - -### View available APIs - -```bash -kubectl get clusterconnection spaces-connection -o jsonpath='{.status.offeredAPIs[*].name}' -``` - -### Check API binding status - -```bash -kubectl get clusterapibinding -``` - -### Debug resource synchronization - -```bash -kubectl describe -``` - -## Removal - -### Using the up CLI - -```bash -up controlplane api-connector uninstall \ - --consumer-kubeconfig ~/.kube/config \ - --all -``` - -The `--all` flag removes all resources including connections and secrets. -Without the flag, runtime related resources persist. - -:::note -Uninstall doesn't remove any API objects in the provider control plane. -To clean all API objects, delete all API objects from the consumer -cluster before API connector uninstallation, and wait for the objects to get -deleted. -::: - - -### Using Helm - -```bash -helm uninstall api-connector -n upbound-system -``` - - -## Limitations - - -- **Preview feature**: Subject to breaking changes. Not yet production grade. -- **CRD updates**: CRDs are pulled once but not automatically updated. If multiple Crossplane clusters offer the same CRD API, API changes must be synchronized out of band, for example using a [Crossplane Configuration](https://docs.crossplane.io/latest/packages/). -- **Network requirements**: Consumer cluster must have direct network access to provider cluster. -- **Wide permissions needed in consumer cluster**: Because the API connector doesn't know up front the names of the APIs it needs to reconcile, it currently runs with full "root" privileges in the consumer cluster. - -- **Connector polling**: API Connector checks for drift between the consumer and provider cluster - periodically through polling. The poll interval can be changed with the `pollInterval` Helm value. - - -## Advanced configuration - -### Multiple connections - -You can connect to multiple provider clusters simultaneously by creating multiple `ClusterConnection` resources with different names and configurations. - -[contact]: https://www.upbound.io/contact-us diff --git a/self-hosted-spaces_versioned_docs/version-1.16/howtos/attach-detach.md b/self-hosted-spaces_versioned_docs/version-1.16/howtos/attach-detach.md deleted file mode 100644 index bd5c1c151..000000000 --- a/self-hosted-spaces_versioned_docs/version-1.16/howtos/attach-detach.md +++ /dev/null @@ -1,242 +0,0 @@ ---- -title: Connect or disconnect a Space -sidebar_position: 12 -description: Enable and connect self-hosted Spaces to the Upbound console ---- - -:::important -This feature is in preview. You must -deploy and [enable the Query API][enable-the-query-api] and [enable Upbound -RBAC][enable-upbound-rbac] to connect a Space to Upbound. -::: - -[Upbound][upbound] allows you to connect self-hosted Spaces and enables a streamlined operations and debugging experience in your Console. - -## Usage - -### Connect - -Before you begin, make sure you have: - -- An existing Upbound [organization][organization] in Upbound SaaS. -- The `up` CLI installed and logged into your organization -- `kubectl` installed with the kubecontext of your self-hosted Space cluster. -- A `token.json` license, provided by your Upbound account representative. -- You enabled the [Query API][query-api] in the self-hosted Space. - -Create a new `UPBOUND_SPACE_NAME`. If you don't create a name, `up` automatically generates one for you: - -```ini -export UPBOUND_SPACE_NAME=your-self-hosted-space -``` - -#### With up CLI - -:::tip -The command tries to connect the Space to the org account context pointed at by your `up` CLI profile. Make sure you've logged into Upbound SaaS with `up login -a ` before trying to connect the Space. -::: - -Connect the Space to the Console: - -```bash -up space connect "${UPBOUND_SPACE_NAME}" -``` - -This command installs a Connect agent, creates a service account, and configures permissions in your Upbound cloud organization in the `upbound-system` namespace of your Space. - -#### With Helm - -Export your Upbound org account name to an environment variable called `UPBOUND_ORG_NAME`. You can see this value by running `up org list` after logging on to Upbound. - -```ini -export UPBOUND_ORG_NAME=your-org-name -``` - -Create a new robot token and export it to an environment variable called `UPBOUND_TOKEN`: - -```bash -up robot create "${UPBOUND_SPACE_NAME}" --description="Robot used for authenticating Space '${UPBOUND_SPACE_NAME}' with Upbound Connect" -export UPBOUND_TOKEN=$(up robot token create "$UPBOUND_SPACE_NAME" "$UPBOUND_SPACE_NAME" --file - | jq -r '.token') -``` - -:::note -Follow the [`jq` installation guide][jq-install] if your machine doesn't include -it by default. -::: - -Create a secret containing the robot token: - -```bash -kubectl create secret -n upbound-system generic connect-token --from-literal=token=${UPBOUND_TOKEN} -``` - -Specify your username and password for the helm OCI registry: - -```bash -jq -r .token $SPACES_TOKEN_PATH | helm registry login xpkg.upbound.io -u $(jq -r .accessId $SPACES_TOKEN_PATH) --password-stdin -``` - -In the same cluster where you installed the Spaces software, install the Upbound connect agent with your token secret. - -```bash -helm -n upbound-system upgrade --install agent \ - oci://xpkg.upbound.io/spaces-artifacts/agent \ - --version "0.0.0-1116.g14cbfe6" \ - --set "global.space=${UPBOUND_SPACE_NAME}" \ - --set "global.organization=${UPBOUND_ORG_NAME}" \ - --set "global.tokenSecret=connect-token" \ - --set "image.repository=xpkg.upbound.io/spaces-artifacts/agent" \ - --set "registration.image.repository=xpkg.upbound.io/spaces-artifacts/register-init" \ - --set "registration.enabled=true" \ - --set "imagePullSecrets[0].name=upbound-pull-secret" \ - --set "billing.enabled=false" \ - --wait -``` - -#### Use an HTTP proxy - -The Connect agent respects `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` for outbound traffic to Upbound. Set them on the agent with `extraEnv` and on the registration init container with `registration.extraEnv`. If you use Zscaler or a similar corporate forward proxy, use the same pattern for both as shown below. - -| Variable | Description | -| --- | --- | -| `HTTP_PROXY` | HTTP proxy URL | -| `HTTPS_PROXY` | HTTPS proxy URL | -| `NO_PROXY` | Comma-separated hosts that bypass the proxy | - -Example `agent-proxy-values.yaml`: - -```yaml -extraEnv: - - name: HTTP_PROXY - value: "http://proxy.example.com:8080" - - name: HTTPS_PROXY - value: "http://proxy.example.com:8080" - - name: NO_PROXY - value: "10.0.0.0/8,.svc.cluster.local,localhost,127.0.0.1" -registration: - extraEnv: - - name: HTTP_PROXY - value: "http://proxy.example.com:8080" - - name: HTTPS_PROXY - value: "http://proxy.example.com:8080" - - name: NO_PROXY - value: "10.0.0.0/8,.svc.cluster.local,localhost,127.0.0.1" -``` - -Install or upgrade using that file: - -```bash -helm -n upbound-system upgrade --install agent \ - oci://xpkg.upbound.io/spaces-artifacts/agent \ - --version "0.0.0-1116.g14cbfe6" \ - --set "global.space=${UPBOUND_SPACE_NAME}" \ - --set "global.organization=${UPBOUND_ORG_NAME}" \ - --set "global.tokenSecret=connect-token" \ - --set "image.repository=xpkg.upbound.io/spaces-artifacts/agent" \ - --set "registration.image.repository=xpkg.upbound.io/spaces-artifacts/register-init" \ - --set "registration.enabled=true" \ - --set "imagePullSecrets[0].name=upbound-pull-secret" \ - --set "billing.enabled=false" \ - -f agent-proxy-values.yaml \ - --wait -``` - - -#### View your Space in the Console - - -Go to the [Upbound Console][upbound-console], log in, and choose the newly connected Space from the Space selector dropdown. - -![A screenshot of the Upbound Console space selector dropdown](/img/attached-space.png) - -:::note -You can only connect a self-hosted Space to a single organization at a time. -::: - -### Disconnect - -#### With up CLI - -To disconnect a self-hosted Space or a deleted self-hosted Space, run the following command: - -```bash -up space disconnect "${UPBOUND_SPACE_NAME}" -``` - -If the Space still exists, this command uninstalls the Connect agent and deletes the associated service account and permissions. - -#### With Helm - -To disconnect a self-hosted Space or a deleted self-hosted Space, run the following command: - -```bash -helm delete -n upbound-system agent -``` - -Clean up the robot token you created for this self-hosted Space: - -```bash -up robot delete "${UPBOUND_SPACE_NAME}" --force -``` - -## Security model - -### Architecture - -![An architectural diagram of a self-hosted Space attached to Upbound](/img/console-attach-architecture.jpg) - -:::note -This diagram illustrates a self-hosted Space running in AWS connected to the global Upbound Console. The same model applies to a Space running in AKS, GKE, or other Kubernetes environments. -::: - -### Data path - -Upbound uses a Pub/Sub model over TLS to communicate between Upbound's global -console and your self-hosted Space. Self-hosted Spaces establishes a secure -connection with `connect.upbound.io`. `api.upbound.io`, and `auth.upbound.io` and subscribes to an -endpoint. - -:::important -Add `connect.upbound.io`, `api.upbound.io`, and `auth.upbound.io` to your organization's list of -allowed endpoints. -::: - -The -Upbound Console communicates to the Space through that endpoint. The data flow -is: - -1. Users sign in to the Upbound Console, redirecting to authenticate with an organization's configured Identity Provider via SSO. -2. Once authenticated, actions in the Console, like listing control planes or specific resource types from a control plane. These requests post as messages to the Upbound Connect service. -3. A user's self-hosted Space polls the Upbound Connect service periodically for new messages, verifies the authenticity of the message, and fulfills the request contained. -4. A user's self-hosted Space returns the results of the request to the Upbound Connect service and the Console renders the results in the user's browser session. - -**Upbound never stores data originated from a self-hosted Space.** The data is transient and only exposed in the user's browser session. The Console needs this data to render your resources and control planes in the UI. - -### Data transmitted - -Users interact with the Upbound Console to generate request queries to the Upbound Connect Service while exploring, managing, or debugging a self-hosted Space. These requests send data back to the user's browser session in the Console, including: - -* Metadata for the Space -* Metadata for control planes in the state -* Configuration manifests for various resource types within your Space: Crossplane managed resources, composite resources, composite resource claims, Upbound shared secrets, Upbound shared backups, Crossplane providers, ProviderConfigs, Configurations, and Crossplane Composite Functions. - -:::important -This data only concerns resource configuration. The data _inside_ the managed -resource in your Space isn't visible at any point. -::: - -**Upbound can't see your data.** Upbound doesn't have access to session-based data rendered for your users in the Upbound Console. Upbound has no information about your self-hosted Space, other than that you've connected a self-hosted Space. - -### Threat vectors - -Only users with editor or administrative permissions can make changes using the Console like creating or deleting control planes or groups. - - -[enable-the-query-api]: /self-hosted-spaces/howtos/query-api -[enable-upbound-rbac]: /manuals/platform/rbac -[upbound]: /manuals/console/upbound-console -[organization]: /manuals/platform/organizations -[query-api]: /self-hosted-spaces/howtos/query-api -[jq-install]: https://jqlang.org/download/ - -[upbound-console]: https://console.upbound.io diff --git a/self-hosted-spaces_versioned_docs/version-1.16/howtos/auto-upgrade.md b/self-hosted-spaces_versioned_docs/version-1.16/howtos/auto-upgrade.md deleted file mode 100644 index 9b6f14b0c..000000000 --- a/self-hosted-spaces_versioned_docs/version-1.16/howtos/auto-upgrade.md +++ /dev/null @@ -1,128 +0,0 @@ ---- -title: Automatically upgrade control planes -sidebar_position: 50 -description: How to configure automatic upgrades of Crossplane in a control plane -plan: "standard" ---- - - - -Upbound Spaces can automatically upgrade the version of Upbound Crossplane in managed control plane instances. You can edit the `spec.crossplane.autoUpgrade` field in your `ControlPlane` specification with the available release channels below. - - -| Channel | Description | Example | -|------------|-----------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------| -| **None** | Disables auto upgrades. | _Uses version specified in `spec.crossplane.version`._ | -| **Patch** | Upgrades to the latest supported patch release. | _Control plane version 1.12.2-up.2 auto upgrades to 1.12.3-up.1 upon release._ | -| **Stable** | Default setting. Upgrades to the latest supported patch release on minor version _N-1_ where N is the latest supported minor version. | _If latest supported minor version is 1.14, auto upgrades to latest patch - 1.13.2-up.3_ | -| **Rapid** | Upgrades to the latest supported patch release on the latest supported minor version. | _If the latest supported minor version is 1.14, auto upgrades to the latest patch of minor version. 1.14 upgrades to 1.14.5-up.1_ | - - -:::warning - - -The `Rapid` channel is only recommended for users willing to accept the risk of new features and potentially breaking changes. - - -::: - -## Examples - -The specs below are examples of how to edit the `autoUpgrade` channel in your `ControlPlane` specification. - -To run a control plane with the `Rapid` auto upgrade channel, your spec should look like this: - -```yaml -apiVersion: spaces.upbound.io/v1beta1 -kind: ControlPlane -metadata: - name: example-ctp -spec: - crossplane: - autoUpgrade: - channel: Rapid - writeConnectionSecretToRef: - name: kubeconfig-example-ctp -``` - -To run a control plane with a pinned version of Crossplane, specify in the `version` field: - -```yaml -apiVersion: spaces.upbound.io/v1beta1 -kind: ControlPlane -metadata: - name: example-ctp -spec: - crossplane: - version: 1.14.3-up.1 - autoUpgrade: - channel: None - writeConnectionSecretToRef: - name: kubeconfig-example-ctp -``` - -## Supported Crossplane versions - -Spaces supports the three [preceding minor versions][preceding-minor-versions] from the last supported minor version. For example, if the last supported minor version is `1.14`, minor versions `1.13` and `1.12` are also supported. Versions older than the three most recent minor versions aren't supported. Only supported Crossplane versions are valid specifications for new control planes. - -Current Crossplane version support by Spaces version: - -| Spaces Version | Crossplane Version Min | Crossplane Version Max | -|:--------------:|:----------------------:|:----------------------:| -| 1.2 | 1.13 | 1.15 | -| 1.3 | 1.13 | 1.15 | -| 1.4 | 1.14 | 1.16 | -| 1.5 | 1.14 | 1.16 | -| 1.6 | 1.14 | 1.16 | -| 1.7 | 1.14 | 1.16 | -| 1.8 | 1.15 | 1.17 | -| 1.9 | 1.16 | 1.18 | -| 1.10 | 1.16 | 1.18 | -| 1.11 | 1.16 | 1.18 | -| 1.12 | 1.17 | 1.19 | - - -Upbound offers extended support for all installed Crossplane versions released within a 12 month window since the last Spaces release. Contact your Upbound sales representative for more information on version support. - - -:::warning - -If the auto upgrade channel is `Stable` or `Rapid`, the Crossplane version always remains within the support window after auto upgrade. If set to `Patch` or `None`, the minor version may be outside the support window. You are responsible for upgrading to a supported version - -::: - -To view the support status of a control plane instance, use `kubectl get ctp`. - -```bash -kubectl get ctp -NAME CROSSPLANE VERSION SUPPORTED READY MESSAGE AGE -example-ctp 1.13.2-up.3 True True 31m - -``` - -Unsupported versions return `SUPPORTED: False`. - -```bash -kubectl get ctp -NAME CROSSPLANE VERSION SUPPORTED READY MESSAGE AGE -example-ctp 1.11.5-up.1 False True 31m - -``` - -For more information, use the `-o yaml` flag to return more information. - -```bash -kubectl get controlplanes.spaces.upbound.io example-ctp -o yaml -status: -conditions: -... -- lastTransitionTime: "2024-01-23T06:36:10Z" - message: Crossplane version 1.11.5-up.1 is outside of the support window. - Oldest supported minor version is 1.12. - reason: UnsupportedCrossplaneVersion - status: "False" - type: Supported -``` - - -[preceding-minor-versions]: /reference/usage/lifecycle/#maintenance-and-updates diff --git a/self-hosted-spaces_versioned_docs/version-1.16/howtos/automation-and-gitops/_category_.json b/self-hosted-spaces_versioned_docs/version-1.16/howtos/automation-and-gitops/_category_.json deleted file mode 100644 index b65481af6..000000000 --- a/self-hosted-spaces_versioned_docs/version-1.16/howtos/automation-and-gitops/_category_.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "label": "Automation & GitOps", - "position": 11, - "collapsed": true, - "customProps": { - "plan": "business" - } -} diff --git a/self-hosted-spaces_versioned_docs/version-1.16/howtos/automation-and-gitops/overview.md b/self-hosted-spaces_versioned_docs/version-1.16/howtos/automation-and-gitops/overview.md deleted file mode 100644 index bd8f8e728..000000000 --- a/self-hosted-spaces_versioned_docs/version-1.16/howtos/automation-and-gitops/overview.md +++ /dev/null @@ -1,133 +0,0 @@ ---- -title: Automation and GitOps Overview -sidebar_label: Overview -sidebar_position: 1 -description: Guide to automating control plane deployments with GitOps and Argo CD -plan: "business" ---- - -Automating control plane deployments with GitOps enables declarative, version-controlled infrastructure management. This section covers integrating GitOps workflows with Upbound control planes using Argo CD and related tools. - - -## What is GitOps? - -GitOps is an approach for managing infrastructure by: -- **Declaratively describing** desired system state in Git -- **Using controllers** to continuously reconcile actual state with desired state -- **Treating Git as the source of truth** for all configuration and deployments - -Upbound control planes are fully compatible with GitOps patterns and we strongly recommend integrating GitOps in the platforms you build on Upbound. - -## Key Concepts - -### Argo CD -[Argo CD](https://argo-cd.readthedocs.io/) is a popular Kubernetes-native GitOps controller. It continuously monitors Git repositories and automatically applies changes to your infrastructure when commits are detected. - -### Deployment Models - -The way you configure GitOps depends on your deployment model: - -| Aspect | Cloud Spaces | Self-Hosted Spaces | -|--------|--------------|-------------------| -| **Access Method** | Upbound API with tokens | Kubernetes native (secrets/kubeconfig) | -| **Configuration** | Kubeconfig via `up` CLI | Control plane connection secrets | -| **Setup Complexity** | More involved (API integration) | Simpler (native Kubernetes) | -| **Typical Use Case** | Managing Upbound resources | Managing workloads on control planes | - -## Getting Started - -**Choose your path based on your deployment model:** - -###. Cloud Spaces -If you're using Upbound Cloud Spaces (Dedicated or Managed): -1. Start with [GitOps with Upbound Control Planes](/cloud-spaces/howtos/gitops-on-upbound/) -2. Learn how to integrate Argo CD with Cloud Spaces -3. Manage both control plane infrastructure and Upbound resources declaratively - -###. Self-Hosted Spaces -If you're running self-hosted Spaces: -1. Start with [GitOps with ArgoCD in Self-Hosted Spaces](../gitops.md) -2. Learn how to configure control plane connection secrets -3. Manage workloads deployed to your control planes - -## Common Workflows - -### Workflow 1: Managing Control Planes with GitOps -Create and manage control planes themselves declaratively using provider-kubernetes: - -```yaml -apiVersion: kubernetes.crossplane.io/v1alpha2 -kind: Object -metadata: - name: my-controlplane -spec: - forProvider: - manifest: - apiVersion: spaces.upbound.io/v1beta1 - kind: ControlPlane - # ... control plane configuration -``` - -### Workflow 2: Managing Workloads on Control Planes -Deploy applications and resources to control planes using standard Kubernetes GitOps patterns: - -```yaml -apiVersion: v1 -kind: Namespace -metadata: - name: my-app ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: my-app - namespace: my-app -# ... deployment configuration -``` - -### Workflow 3: Managing Upbound Resources -Use provider-upbound to manage Upbound IAM and repository resources: - -- Teams -- Robots and their team memberships -- Repositories and permissions - -## Advanced Topics - -### Argo CD Plugin for Upbound -Learn more in the [ArgoCD Plugin guide](../use-argo.md) for enhanced integration with self-hosted Spaces. - -### Declarative Control Plane Creation -See [Declaratively create control planes](../declarative-ctps.md) for advanced automation patterns. - -### Consuming Control Plane APIs -Understand how to [consume control plane APIs in your app cluster](../mcp-connector-guide.md) with Argo CD. - -## Prerequisites - -Before implementing GitOps with control planes, ensure you have: - -**For Cloud Spaces:** -- Access to Upbound Cloud Spaces -- `up` CLI installed and configured -- API token with appropriate permissions -- Argo CD or similar GitOps controller running -- Familiarity with Kubernetes RBAC - -**For Self-Hosted Spaces:** -- Self-hosted Spaces deployed and running -- Argo CD deployed in your infrastructure -- Kubectl access to the cluster hosting Spaces -- Understanding of control plane architecture - -## Next Steps - -1. **Choose your deployment model** above -2. **Review the relevant getting started guide** -3. **Set up your GitOps controller** (Argo CD) -4. **Deploy your first automated control plane** -5. **Explore advanced topics** as needed - -:::tip -Start with simple deployments to test your GitOps workflow before moving to production. Use [simulations](../simulations.md) to preview changes before applying them. -::: diff --git a/self-hosted-spaces_versioned_docs/version-1.16/howtos/backup-and-restore.md b/self-hosted-spaces_versioned_docs/version-1.16/howtos/backup-and-restore.md deleted file mode 100644 index 00fbfee57..000000000 --- a/self-hosted-spaces_versioned_docs/version-1.16/howtos/backup-and-restore.md +++ /dev/null @@ -1,516 +0,0 @@ ---- -title: Backup and restore -sidebar_position: 13 -description: Configure and manage backups in your Upbound Space. -plan: "enterprise" ---- - - - -Upbound's _Shared Backups_ is a built-in backup and restore feature. Shared Backups lets you configure automatic schedules for taking snapshots of your control planes. You can restore data from these backups by making new control planes. This guide explains how to use Shared Backups for disaster recovery or upgrade scenarios. - - -## Benefits - -The Shared Backups feature provides the following benefits: - -* Automatic backups for control planes without any operational overhead -* Backup schedules for multiple control planes in a group -* Shared Backups are available across all hosting environments of Upbound (Disconnected, Connected or Cloud Spaces) - - -## Configure a Shared Backup Config - - -[SharedBackupConfig][sharedbackupconfig] is a [group-scoped][group-scoped] resource. You should create them in a group containing one or more control planes. This resource configures the storage details and provider. Whenever a backup executes (either by schedule or manually initiated), it references a SharedBackupConfig to tell it where store the snapshot. - - -### Backup config provider - - -The `spec.objectStorage.provider` and `spec.objectStorage.config` fields configures: - -* The object storage provider -* The path to the provider -* The credentials needed to communicate with the provider - -You can only set one provider. Upbound currently supports AWS, Azure, and GCP as providers. - - -`spec.objectStorage.config` is a freeform map of configuration options for the object storage provider. See [Thanos object storage][thanos-object-storage] for more information on the formats for each supported cloud provider. `spec.bucket` and `spec.provider` overrides the required values in the config. - - - -#### AWS as a storage provider - -:::important -For Cloud Spaces, static credentials are currently the only supported auth method. -::: - -This example demonstrates how to use AWS as a storage provider for your backups: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackupConfig -metadata: - name: default - namespace: default -spec: - objectStorage: - provider: AWS - bucket: spaces-backup-bucket - config: - endpoint: s3.eu-west-2.amazonaws.com - region: eu-west-2 - credentials: - source: Secret - secretRef: - name: bucket-creds - key: creds -``` - - -This example assumes you've already created an S3 bucket called "spaces-backup-bucket" in AWS `eu-west-2` region. The account credentials to access the bucket should exist in a secret of the same namespace as the Shared Backup Config. - -#### Azure as a storage provider - -:::important -For Cloud Spaces, static credentials are currently the only supported auth method. -::: - -This example demonstrates how to use Azure as a storage provider for your backups: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackupConfig -metadata: - name: default - namespace: default -spec: - objectStorage: - provider: Azure - bucket: upbound-backups - config: - storage_account: upbackupstore - container: upbound-backups - endpoint: blob.core.windows.net - credentials: - source: Secret - secretRef: - name: bucket-creds - key: creds -``` - - -This example assumes you've already created an Azure storage account called `upbackupstore` and blob `upbound-backups`. The storage account key to access the blob should exist in a secret of the same namespace as the Shared Backup Config. - - -#### GCP as a storage provider - -:::important -For Cloud Spaces, static credentials are currently the only supported auth method. -::: - -This example demonstrates how to use Google Cloud Storage as a storage provider for your backups: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackupConfig -metadata: - name: default - namespace: default -spec: - objectStorage: - provider: GCP - bucket: spaces-backup-bucket - credentials: - source: Secret - secretRef: - name: bucket-creds - key: creds -``` - - -This example assumes you've already created a Cloud bucket called "spaces-backup-bucket" and a service account with access to this bucket. The key file should exist in a secret of the same namespace as the Shared Backup Config. - - -## Configure a Shared Backup Schedule - - -[SharedBackupSchedule][sharedbackupschedule] is a [group-scoped][group-scoped-1] resource. You should create them in a group containing one or more control planes. This resource defines a backup schedule for control planes within its corresponding group. - -Below is an example of a Shared Backup Schedule that takes backups every day of all control planes having `environment: production` labels: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackupSchedule -metadata: - name: daily-schedule - namespace: default -spec: - schedule: "@daily" - configRef: - kind: SharedBackupConfig - name: default - controlPlaneSelector: - labelSelectors: - - matchLabels: - environment: production -``` - -### Define a schedule - -The `spec.schedule` field is a [Cron-formatted][cron-formatted] string. Some common examples are below: - - -| Entry | Description | -| ----------------- | ------------------------------------------------------------------------------------------------- | -| `@hourly` | Run once an hour. | -| `@daily` | Run once a day. | -| `@weekly` | Run once a week. | -| `0 0/4 * * *` | Run every 4 hours. | -| `0/15 * * * 1-5` | Run every fifteenth minute on Monday through Friday. | -| `@every 1h30m10s` | Run every 1 hour, 30 minutes, and 10 seconds. Hour is the largest measurement of time for @every. | - - -### Exclude resources from the backup - -The `spec.excludedResources` field is an array of resource names to exclude from each backup. - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackupSchedule -metadata: - name: daily-schedule -spec: - excludedResources: - - "xclusters.aws.platformref.upbound.io" - - "xdatabase.aws.platformref.upbound.io" - - "xrolepolicyattachment.iam.aws.crossplane.io" -``` - -:::warning -You must specify resource names in lowercase "resource.group" format (for example, `xclusters.aws.platformref.upbound.io`). Using only the resource kind (for example, `XCluster`) isn't supported. -::: - -### Suspend a schedule - -Use `spec.suspend` field to suspend the schedule. It creates no new backups, but allows running backups to complete. - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackupSchedule -metadata: - name: daily-schedule -spec: - suspend: true -``` - -### Set the time to live - -Set the `spec.ttl` field to define the time to live for the backup. After this time, the backup is eligible for garbage collection. If this field isn't set, the backup isn't garbage collected. The time to live is a duration, for example, `168h` for 7 days. - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackupSchedule -metadata: - name: daily-schedule -spec: - ttl: 168h # Backup is garbage collected after 7 days -``` -:::tip -By default, this setting doesn't delete uploaded files. Review the next section to define -the deletion policy. -::: - -### Define the deletion policy - -Set the `spec.deletionPolicy` to define backup deletion actions, including the -deletion of the backup file from the bucket. The Deletion Policy value defaults -to `Orphan`. Set it to `Delete` to remove uploaded files in the bucket. For more -information on the backup and restore process, review the [Spaces API -documentation][spaces-api-documentation]. - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackupSchedule -metadata: - name: daily-schedule -spec: - ttl: 168h # Backup is garbage collected after 7 days - deletionPolicy: Delete # Defaults to Orphan -``` - -### Garbage collect backups when the schedule gets deleted - -Set the `spec.useOwnerReferencesInBackup` to garbage collect associated backups when a shared schedule gets deleted. If set to true, backups are garbage collected when the schedule gets deleted. - -### Control plane selection - -To configure which control planes in a group you want to create a backup schedule for, use the `spec.controlPlaneSelector` field. You can either use `labelSelectors` or the `names` of a control plane directly. A control plane matches if any of the label selectors match. - -This example matches all control planes in the group that have `environment: production` as a label: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackupSchedule -metadata: - name: my-backup-schedule -spec: - controlPlaneSelector: - labelSelectors: - - matchLabels: - environment: production -``` - -You can use the more complex `matchExpressions` to match labels based on an expression. This example matches control planes that have label `environment: production` or `environment: staging`: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackupSchedule -metadata: - name: my-backup-schedule -spec: - controlPlaneSelector: - labelSelectors: - - matchExpressions: - - { key: environment, operator: In, values: [production,staging] } -``` - -You can also specify the names of control planes directly: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackupSchedule -metadata: - name: my-backup-schedule -spec: - controlPlaneSelector: - names: - - controlplane-dev - - controlplane-staging - - controlplane-prod -``` - - -## Configure a Shared Backup - - - -[SharedBackup][sharedbackup] is a [group-scoped][group-scoped-2] resource. You should create them in a group containing one or more control planes. This resource causes a backups to occur for control planes within its corresponding group. - -Below is an example of a Shared Backup that takes a backup of all control planes having `environment: production` labels: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackup -metadata: - name: my-backup - namespace: default -spec: - configRef: - kind: SharedBackupConfig - name: default - controlPlaneSelector: - labelSelectors: - - matchLabels: - environment: production -``` - -### Exclude resources from the backup - -The `spec.excludedResources` field is an array of resource names to exclude from each backup. - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackup -metadata: - name: my-backup -spec: - excludedResources: - - "xclusters.aws.platformref.upbound.io" - - "xdatabase.aws.platformref.upbound.io" - - "xrolepolicyattachment.iam.aws.crossplane.io" -``` - -:::warning -You must specify resource names in lowercase "resource.group" format (for example, `xclusters.aws.platformref.upbound.io`). Using only the resource kind (for example, `XCluster`) isn't supported. -::: - -### Set the time to live - -Set the `spec.ttl` field to define the time to live for the backup. After this time, the backup is eligible for garbage collection. If this field isn't set, the backup isn't garbage collected. The time to live is a duration, for example, `168h` for 7 days. - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackup -metadata: - name: my-backup -spec: - ttl: 168h # Backup is garbage collected after 7 days -``` - - -### Garbage collect backups on Shared Backup deletion - - - -Set the `spec.useOwnerReferencesInBackup` to define whether to garbage collect associated backups when a shared backup gets deleted. If set to true, backups are garbage collected when the shared backup gets deleted. - -### Control plane selection - -To configure which control planes in a group you want to create a backup for, use the `spec.controlPlaneSelector` field. You can either use `labelSelectors` or the `names` of a control plane directly. A control plane matches if any of the label selectors match. - -This example matches all control planes in the group that have `environment: production` as a label: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackup -metadata: - name: my-backup -spec: - controlPlaneSelector: - labelSelectors: - - matchLabels: - environment: production -``` - -You can use the more complex `matchExpressions` to match labels based on an expression. This example matches control planes that have label `environment: production` or `environment: staging`: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackup -metadata: - name: my-backup -spec: - controlPlaneSelector: - labelSelectors: - - matchExpressions: - - { key: environment, operator: In, values: [production,staging] } -``` - -You can also specify the names of control planes directly: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackup -metadata: - name: my-backup -spec: - controlPlaneSelector: - names: - - controlplane-dev - - controlplane-staging - - controlplane-prod -``` - -## Create a manual backup - -[Backup][backup] is a [group-scoped][group-scoped-3] resource that causes a single backup to occur for a control planes in its corresponding group. - -Below is an example of a manual Backup of a control plane: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: Backup -metadata: - name: my-backup - namespace: default -spec: - configRef: - kind: SharedBackupConfig - name: default - controlPlane: my-awesome-ctp - deletionPolicy: Delete -``` - -The backup specification `DeletionPolicy` defines backup deletion actions, -including the deletion of the backup file from the bucket. The `Deletion Policy` -value defaults to `Orphan`. Set it to `Delete` to remove uploaded files -in the bucket. -For more information on the backup and restore process, review the [Spaces API documentation][spaces-api-documentation-1]. - - -### Choose a control plane to backup - -The `spec.controlPlane` field defines which control plane to execute a backup against. - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: Backup -metadata: - name: my-backup - namespace: default -spec: - controlPlane: my-awesome-ctp -``` - -If the control plane doesn't exist, the backup fails after multiple failed retry attempts. - -### Exclude resources from the backup - -The `spec.excludedResources` field is an array of resource names to exclude from the manual backup. - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: Backup -metadata: - name: my-backup -spec: - excludedResources: - - "xclusters.aws.platformref.upbound.io" - - "xdatabase.aws.platformref.upbound.io" - - "xrolepolicyattachment.iam.aws.crossplane.io" -``` - -:::warning -You must specify resource names in lowercase "resource.group" format (for example, `xclusters.aws.platformref.upbound.io`). Using only the resource kind (for example, `XCluster`) isn't supported. -::: - -### Set the time to live - -Set the `spec.ttl` field to define the time to live for the backup. After this time, the backup is eligible for garbage collection. If this field isn't set, the backup isn't garbage collected. The time to live is a duration, for example, `168h` for 7 days. - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: Backup -metadata: - name: my-backup -spec: - ttl: 168h # Backup is garbage collected after 7 days -``` - -## Restore a control plane from a backup - -You can restore a control plane's state from a backup. Below is an example of creating a new control plane from a previous backup called `restore-me`: - - -```yaml -apiVersion: spaces.upbound.io/v1beta1 -kind: ControlPlane -metadata: - name: my-awesome-restored-ctp - namespace: default -spec: - restore: - source: - kind: Backup - name: restore-me -``` - - -[group-scoped]: /self-hosted-spaces/concepts/groups -[group-scoped-1]: /self-hosted-spaces/concepts/groups -[group-scoped-2]: /self-hosted-spaces/concepts/groups -[group-scoped-3]: /self-hosted-spaces/concepts/groups -[sharedbackupconfig]: /self-hosted-spaces/reference/ -[thanos-object-storage]: https://thanos.io/tip/thanos/storage.md/ -[sharedbackupschedule]: /self-hosted-spaces/reference/ -[cron-formatted]: https://en.wikipedia.org/wiki/Cron -[spaces-api-documentation]: /self-hosted-spaces/reference/ -[sharedbackup]: /self-hosted-spaces/reference/ -[backup]: /self-hosted-spaces/reference/ -[spaces-api-documentation-1]: /self-hosted-spaces/reference/ - - - diff --git a/self-hosted-spaces_versioned_docs/version-1.16/howtos/billing.md b/self-hosted-spaces_versioned_docs/version-1.16/howtos/billing.md deleted file mode 100644 index 15b48cfba..000000000 --- a/self-hosted-spaces_versioned_docs/version-1.16/howtos/billing.md +++ /dev/null @@ -1,302 +0,0 @@ ---- -title: Self-Hosted Space Billing -sidebar_position: 50 -description: A guide for how billing works in an Upbound Space ---- - - -Spaces are a self-hosting feature of Upbound's [flagship product][flagship-product] for platform teams to deploy control planes in their self-managed environments. You can install Spaces into any Kubernetes cluster in your own cloud account, on-premises data center, or on the edge. The pricing usage-based and requires an Upbound account and subscription. The billing unit is a `Loop`. - - -:::info -This guide describes the traditional usage-based billing model using object storage. For disconnected or air-gapped environments, consider [Capacity Licensing](/self-hosted-spaces/howtos/capacity-licensing), which provides a simpler fixed-capacity model with local usage tracking. -::: - -## Billing details - -Spaces **aren't connected** to Upbound's global service. To enable proper billing, the Spaces software ships a controller whose responsibility is to collect billing data from your Spaces deployment. The collection and storage of your billing data happens expressly locally within your environment; no data is automatically emitted back to Upbound's global service. This data gets written to object storage of your choice. AWS, Azure, and GCP are currently supported. The Spaces software exports billing usage software every ~15 seconds. - -Spaces customers must periodically provide the billing data to Upbound. Contact your Upbound sales representative to learn more. - - - -## AWS S3 - - - -Configure billing to write to an S3 bucket by providing the following values at install-time. Create an S3 bucket if you don't already have one. - -### IAM policy - -You must create an IAM policy and attach it to the IAM user (for static credentials) or IAM role (for assumed -roles). - -The policy example below enables the necessary S3 permissions: - -```json -{ - "Sid":"EnableS3Permissions", - "Effect":"Allow", - "Action": [ - "s3:PutObject", - "s3:GetObject", - "s3:ListBucket", - "s3:DeleteObject" - ], - "Resource": [ - "arn:aws:s3:::your-bucket-name/*", - "arn:aws:s3:::your-bucket-name" - ] -}, -{ - "Sid": "ListBuckets", - "Effect": "Allow", - "Action": "s3:ListAllMyBuckets", - "Resource": "*" -} -``` - -### Authentication with static credentials - -In your Spaces install cluster, create a secret in the `upbound-system` -namespace. This secret must contain keys `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`. - -```bash -kubectl create secret generic billing-credentials -n upbound-system \ - --from-literal=AWS_ACCESS_KEY_ID= \ - --from-literal=AWS_SECRET_ACCESS_KEY= -``` - -Install the Space software, providing the billing details to the other required values. - - - - - - -```bash {hl_lines="2-6"} -helm -n upbound-system upgrade --install spaces ... \ - --set "billing.enabled=true" \ - --set "billing.storage.provider=aws" \ - --set "billing.storage.aws.region=" \ - --set "billing.storage.aws.bucket=" \ - --set "billing.storage.secretRef.name=billing-credentials" - ... -``` - - - - - -```bash {hl_lines="2-6"} -up space init ... \ - --set "billing.enabled=true" \ - --set "billing.storage.provider=aws" \ - --set "billing.storage.aws.region=" \ - --set "billing.storage.aws.bucket=" \ - --set "billing.storage.secretRef.name=billing-credentials" - ... -``` - - - - - - - -### Authentication with an IAM role - - -To use short-lived credentials with an assumed IAM role, create an IAM role with -established trust to the `vector`-serviceaccount in all `mxp-*-system` -namespaces. - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": "arn:aws:iam::12345678912:oidc-provider/oidc.eks.eu-west-2.amazonaws.com/id/YOUROIDCPROVIDERID" - }, - "Action": "sts:AssumeRoleWithWebIdentity", - "Condition": { - "StringLike": { - "oidc.eks.eu-west-2.amazonaws.com/id/YOUROIDCPROVIDERID:sub": "system:serviceaccount:mxp-*-system:vector" - } - } - } - ] -} -``` - -For more information about workload identities, review the [Workload-identity -Configuration documentation][workload-identity-configuration-documentation] - - - - - - -```bash {hl_lines="2-7"} -helm -n upbound-system upgrade --install spaces ... \ - --set "billing.enabled=true" \ - --set "billing.storage.provider=aws" \ - --set "billing.storage.aws.region=" \ - --set "billing.storage.aws.bucket=" \ - --set "billing.storage.secretRef.name=" \ - --set "controlPlanes.vector.serviceAccount.customAnnotations[eks.amazonaws.com/role-arn]=" - ... -``` - - - - - -```bash {hl_lines="2-7"} -up space init ... \ - --set "billing.enabled=true" \ - --set "billing.storage.provider=aws" \ - --set "billing.storage.aws.region=" \ - --set "billing.storage.aws.bucket=" \ - --set "billing.storage.secretRef.name=" \ - --set "controlPlanes.vector.serviceAccount.customAnnotations[eks.amazonaws.com/role-arn]=" - ... -``` - - - - - - -*Note*: You must set `billing.storage.secretRef.name` to an empty string when using an assumed role. - - -## Azure blob storage - -Configure billing to write to a blob in Azure by providing the following values at install-time. Create a storage account and container if you don't already have one. - -Then, on the cluster where you installed the Spaces software, create a secret in `upbound-system`. This secret must contain keys `AZURE_TENANT_ID`, `AZURE_CLIENT_ID`, and `AZURE_CLIENT_SECRET`. Make sure to replace the values with details generated from your Azure account. - -```bash -kubectl create secret generic billing-credentials -n upbound-system \ - --from-literal=AZURE_TENANT_ID= \ - --from-literal=AZURE_CLIENT_ID= \ - --from-literal=AZURE_CLIENT_SECRET= -``` - -Install the Space software, providing the billing details to the other required values. - - - - - - -```bash {hl_lines="2-6"} -helm -n upbound-system upgrade --install spaces ... \ - --set "billing.enabled=true" \ - --set "billing.storage.provider=azure" \ - --set "billing.storage.azure.storageAccount=" \ - --set "billing.storage.azure.container=" \ - --set "billing.storage.secretRef.name=billing-credentials" - ... -``` - - - - - -```bash {hl_lines="2-6"} -up space init ... \ - --set "billing.enabled=true" \ - --set "billing.storage.provider=azure" \ - --set "billing.storage.azure.storageAccount=" \ - --set "billing.storage.azure.container=" \ - --set "billing.storage.secretRef.name=billing-credentials" - ... -``` - - - - - - - -## GCP Cloud Storage Buckets - - -Configure billing to write to a Cloud Storage bucket in GCP by providing the following values at install-time. Create a bucket if you don't already have one. - -Then, on the cluster where you installed the Spaces software, create a secret in `upbound-system`. This secret must contain the key `google_application_credentials`. Make sure to replace the value with a GCP service account key JSON generated from your GCP account. - -```bash -kubectl create secret generic billing-credentials -n upbound-system \ - --from-literal=google_application_credentials= -``` - -Install the Space software, providing the billing details to the other required values. - - - - - - -```bash {hl_lines="2-5"} -helm -n upbound-system upgrade --install spaces ... \ - --set "billing.enabled=true" \ - --set "billing.storage.provider=gcp" \ - --set "billing.storage.gcp.bucket=" \ - --set "billing.storage.secretRef.name=billing-credentials" - ... -``` - - - - - -```bash {hl_lines="2-5"} -up space init ... \ - --set "billing.enabled=true" \ - --set "billing.storage.provider=gcp" \ - --set "billing.storage.gcp.bucket=" \ - --set "billing.storage.secretRef.name=billing-credentials" - ... -``` - - - - - -## Export billing data to send to Upbound - -To prepare the billing data to send to Upbound, do the following: - -Ensure the current context of your kubeconfig points at the Spaces cluster. Then, run the [export][export] command. - - -:::important -Your current CLI must have read access to the bucket to run this command. -::: - - -The example below exports billing data stored in AWS: - -```bash -up space billing export --provider=aws \ - --bucket=spaces-billing-bucket \ - --account=your-upbound-org \ - --billing-month=2024-07 \ - --force-incomplete -``` - -The command creates a billing report that's zipped up in your current working directory. Send the output to your Upbound sales representative. - - -You can find full instructions and command options in the up [CLI reference][cli-reference] docs. - - -[export]: /reference/cli-reference -[cli-reference]: /reference/cli-reference -[flagship-product]: https://www.upbound.io/platform -[workload-identity-configuration-documentation]: https://docs.upbound.io/operate/accounts/authentication/oidc-configuration diff --git a/self-hosted-spaces_versioned_docs/version-1.16/howtos/capacity-licensing.md b/self-hosted-spaces_versioned_docs/version-1.16/howtos/capacity-licensing.md deleted file mode 100644 index d5230493f..000000000 --- a/self-hosted-spaces_versioned_docs/version-1.16/howtos/capacity-licensing.md +++ /dev/null @@ -1,593 +0,0 @@ ---- -title: Capacity Licensing -sidebar_position: 60 -description: A guide for capacity-based licensing in self-hosted Spaces -plan: "enterprise" ---- - - - - - -This guide explains how to configure and monitor capacity-based licensing in -self-hosted Upbound Spaces. Capacity licensing provides a simplified billing -model for disconnected or air-gapped environments where automated usage -reporting isn't possible. - -:::info -Spaces `v1.15` and later support Capacity Licensing as an -alternative to the traditional usage-based billing model described in the -[Self-Hosted Space Billing][space-billing] guide. -::: - - -## Overview - - -Capacity licensing allows organizations to purchase a fixed capacity of -resources upfront. The Spaces software tracks usage locally and provides -visibility into consumption against your purchased capacity, all without -requiring external connectivity to Upbound's services. - -### Key concepts - -- **Resource Hours**: The primary billing unit representing all resources - managed by Crossplane over time. This includes managed resources, - composites (XRs), claims (XRCs), and all composed resources - essentially - everything Crossplane manages. The system aggregates resource counts over each - hour using trapezoidal integration to accurately account for changes in - resource count throughout the hour. -- **Operations**: The number of Operations invoked by Crossplane. -- **License Capacity**: The total amount of resource hours and operations included in your license. -- **Usage Tracking**: Continuous monitoring of consumption with real-time utilization percentages. - -### How it works - -1. Upbound provides you with a license file containing your purchased capacity -2. You configure a `SpaceLicense` in your Spaces cluster -3. The metering system automatically: - - Collects measurements from all control planes every minute - - Aggregates usage data into hourly intervals - - Stores usage data in a local PostgreSQL database - - Updates the `SpaceLicense` status with current consumption - -## Prerequisites - -### PostgreSQL database - -Capacity licensing requires a PostgreSQL database to store usage measurements. You can use: - -- An existing PostgreSQL instance -- A managed PostgreSQL service (AWS RDS, Azure Database, Google Cloud SQL) -- A PostgreSQL instance deployed in your cluster - -The database must be: - -- Accessible from the Spaces cluster -- Configured with a dedicated database and credentials - -#### Example: Deploy PostgreSQL with CloudNativePG - -If you don't have an existing PostgreSQL instance, you can deploy one in your -cluster using [CloudNativePG] (CNPG). CNPG is a Kubernetes operator that -manages PostgreSQL clusters. - -1. Install the CloudNativePG operator: - -```bash -kubectl apply -f https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/release-1.24/releases/cnpg-1.24.1.yaml -``` - -2. Create a PostgreSQL cluster for metering: - -```yaml -apiVersion: postgresql.cnpg.io/v1 -kind: Cluster -metadata: - name: metering-postgres - namespace: upbound-system -spec: - instances: 1 - imageName: ghcr.io/cloudnative-pg/postgresql:16 - bootstrap: - initdb: - database: metering - owner: metering - postInitApplicationSQL: - - ALTER ROLE "metering" CREATEROLE; - storage: - size: 5Gi - # Optional: Configure resources for production use - # resources: - # requests: - # memory: "512Mi" - # cpu: "500m" - # limits: - # memory: "1Gi" - # cpu: "1000m" ---- -apiVersion: v1 -kind: Secret -metadata: - name: metering-postgres-app - namespace: upbound-system - labels: - cnpg.io/reload: "true" -stringData: - username: metering - password: "your-secure-password-here" -type: kubernetes.io/basic-auth -``` - -```bash -kubectl apply -f metering-postgres.yaml -``` - -3. Wait for the cluster to be ready: - -```bash -kubectl wait --for=condition=ready cluster/metering-postgres -n upbound-system --timeout=5m -``` - -4. You can access the PostgreSQL cluster at `metering-postgres-rw.upbound-system.svc.cluster.local:5432`. - -:::tip -For production deployments, consider: -- Increasing `instances` to 3 for high availability -- Configuring [backups] to object storage -- Setting appropriate resource requests and limits -- Using a dedicated storage class with good I/O performance -::: - -### License file - -Contact your Upbound sales representative to obtain a license file for your organization. The license file contains: -- Your unique license ID -- Purchased capacity (resource hours and operations) -- License validity period -- Any usage restrictions (such as cluster UUID pinning) - -## Configuration - -### Step 1: Create database credentials secret - -Create a Kubernetes secret containing your PostgreSQL password using the pgpass format: - -```bash -# Create a pgpass file with format: hostname:port:database:username:password -# Note: The database name and username must be 'metering' -# For CNPG clusters, use the read-write service endpoint: -rw..svc.cluster.local -echo "metering-postgres-rw.upbound-system.svc.cluster.local:5432:metering:metering:your-secure-password-here" > pgpass - -# Create the secret -kubectl create secret generic metering-postgres-credentials \ - -n upbound-system \ - --from-file=pgpass=pgpass - -# Clean up the pgpass file -rm pgpass -``` - -The secret must contain a single key: -- **`pgpass`**: PostgreSQL password file in the format `hostname:port:metering:metering:password` - -:::note -The database name and username are fixed as `metering`. Ensure your PostgreSQL instance has a database named `metering` with a user `metering` that has appropriate permissions. - -If you deployed PostgreSQL using CNPG as shown in the example above, the password should match what you set in the `metering-postgres-app` secret. -::: - -:::tip -For production environments, consider using external secret management solutions: -- [External Secrets Operator][eso] -- Cloud-specific secret managers (AWS Secrets Manager, Azure Key Vault, GCP Secret Manager) -::: - -### Step 2: Enable metering in Spaces - -Enable the metering feature when installing or upgrading Spaces: - - - - - -```bash {hl_lines="2-7"} -helm -n upbound-system upgrade --install spaces ... \ - --set "metering.enabled=true" \ - --set "metering.storage.postgres.connection.url=metering-postgres-rw.upbound-system.svc.cluster.local:5432" \ - --set "metering.storage.postgres.connection.credentials.secret.name=metering-postgres-credentials" \ - --set "metering.interval=1m" \ - --set "metering.workerCount=10" \ - --set "metering.aggregationInterval=1h" \ - --set "metering.measurementRetentionDays=30" - ... -``` - - - - - -```bash {hl_lines="2-7"} -up space init ... \ - --set "metering.enabled=true" \ - --set "metering.storage.postgres.connection.url=metering-postgres-rw.upbound-system.svc.cluster.local:5432" \ - --set "metering.storage.postgres.connection.credentials.secret.name=metering-postgres-credentials" \ - --set "metering.interval=1m" \ - --set "metering.workerCount=10" \ - --set "metering.aggregationInterval=1h" \ - --set "metering.measurementRetentionDays=30" - ... -``` - - - - - -#### Configuration options - -| Option | Default | Description | -|--------|---------|-------------| -| `metering.enabled` | `false` | Enable the metering feature | -| `metering.storage.postgres.connection.url` | - | PostgreSQL host and port (format: `host:port`, required) | -| `metering.storage.postgres.connection.credentials.secret.name` | - | Name of the secret containing PostgreSQL credentials (required) | -| `metering.storage.postgres.connection.sslmode` | `require` | SSL mode for PostgreSQL connection (`disable`, `allow`, `prefer`, `require`, `verify-ca`, `verify-full`) | -| `metering.storage.postgres.connection.ca.name` | - | Name of the secret containing CA certificate for TLS connections (optional) | -| `metering.interval` | `1m` | How often to collect measurements from control planes | -| `metering.workerCount` | `10` | Number of parallel workers for measurement collection | -| `metering.aggregationInterval` | `1h` | How often to aggregate measurements into hourly usage data | -| `metering.measurementRetentionDays` | `30` | Days to retain raw measurements (0 = indefinite) | - - -#### Database sizing and retention - -The metering system uses two PostgreSQL tables to track usage: - -**Raw measurements table** (`measurements`): -- Stores point-in-time snapshots collected every measurement interval (default: 1 minute) -- One row per control plane per interval -- Affected by the `measurementRetentionDays` setting -- Used for detailed auditing and troubleshooting - -**Aggregated usage table** (`hourly_usage`): -- Stores hourly aggregated resource hours and operations per license -- One row per hour per license -- Never deleted (required for accurate license tracking) -- Grows much slower than raw measurements - -##### Storage sizing guidelines - -Estimate your PostgreSQL storage needs based on these factors: - - -| Deployment Size | Control Planes | Measurement Interval | Retention Days | Raw Measurements | Indexes & Overhead | Total Storage | -|----------------|----------------|---------------------|----------------|------------------|-------------------|---------------| -| Small | 10 | 1m | 30 | ~85 MB | ~40 MB | **~125 MB** | -| Medium | 50 | 1m | 30 | ~430 MB | ~215 MB | **~645 MB** | -| Large | 200 | 1m | 30 | ~1.7 GB | ~850 MB | **~2.5 GB** | -| Large (90-day retention) | 200 | 1m | 90 | ~5.2 GB | ~2.6 GB | **~7.8 GB** | - -The aggregated hourly usage table adds minimal overhead (~50 KB per year per license). - -**Formula for custom calculations**: -``` -Daily measurements per control plane = (24 * 60) / interval_minutes -Total rows = control_planes × daily_measurements × retention_days -Storage (MB) ≈ (total_rows × 200 bytes) / 1,048,576 × 1.5 (with indexes) -``` - -##### Retention behavior - -The `measurementRetentionDays` setting controls retention of raw measurement data: - -- **Default: 30 days** - Balances audit capabilities with storage efficiency -- **Set to 0**: Disables cleanup, retains all raw measurements indefinitely -- **Cleanup runs**: Every aggregation interval (default: hourly) -- **What's kept forever**: Aggregated hourly usage data (needed for license tracking) -- **What's cleaned up**: Raw point-in-time measurements older than retention period - -**Recommendations**: -- **30 days**: For most troubleshooting and short-term auditing -- **60 to 90 days**: For environments requiring extended audit trails -- **Unlimited (0)**: Only for environments with ample storage or specific compliance requirements - -:::note -Increasing retention period linearly increases storage requirements for raw measurements. The aggregated hourly data is always retained regardless of this setting. -::: - -### Step 3: Apply your license - -Use the `up` CLI to apply your license file: - -```bash -up space license apply /path/to/license.json -``` - -This command automatically: -- Creates a secret containing your license file in the `upbound-system` namespace -- Creates the `SpaceLicense` resource configured to use that secret - -:::tip -You can specify a different namespace for the license secret using the `--namespace` flag: -```bash -up space license apply /path/to/license.json --namespace my-namespace -``` -::: - -
-Alternative: Manual kubectl approach - -If you prefer not to use the `up` CLI, you can manually create the resources: - -1. Create the license secret: - -```bash -kubectl create secret generic space-license \ - -n upbound-system \ - --from-file=license.json=/path/to/license.json -``` - -2. Create the SpaceLicense resource: - -```yaml -apiVersion: admin.spaces.upbound.io/v1alpha1 -kind: SpaceLicense -metadata: - name: space -spec: - secretRef: - name: space-license - namespace: upbound-system - key: license.json -``` - -```bash -kubectl apply -f spacelicense.yaml -``` - -:::important -You **must** name the `SpaceLicense` resource `space`. This resource is a singleton and only one can exist in the cluster. -::: - -
- -## Monitoring usage - -### Check license status - -Use the `up` CLI to view your license details and current usage: - -```bash -up space license show -``` - -Example output: - -``` -Spaces License Status: Valid (License is valid) - -Created: 2024-01-01T00:00:00Z -Expires: 2025-01-01T00:00:00Z - -Plan: enterprise - -Resource Hour Limit: 1000000 -Operation Limit: 500000 - -Enabled Features: -- spaces -- query-api -- backup-restore -``` - -The output shows: -- License validity status and any validation messages -- Creation and expiration dates -- Your commercial plan tier -- Capacity limits for resource hours and operations -- Enabled features in your license -- Any restrictions (such as cluster UUID pinning) - -
-Alternative: View detailed status with kubectl - -For detailed information including usage statistics, use kubectl: - -```bash -kubectl get spacelicense space -o yaml -``` - -Example output showing usage data: - -```yaml -apiVersion: admin.spaces.upbound.io/v1alpha1 -kind: SpaceLicense -metadata: - name: space -spec: - secretRef: - name: space-license - namespace: upbound-system -status: - conditions: - - type: LicenseValid - status: "True" - reason: Valid - message: "License is valid" - id: "lic_abc123xyz" - plan: "enterprise" - capacity: - resourceHours: 1000000 - operations: 500000 - usage: - resourceHours: 245680 - operations: 12543 - resourceHoursUtilization: "24.57%" - operationsUtilization: "2.51%" - firstMeasurement: "2024-01-15T10:00:00Z" - lastMeasurement: "2024-02-10T14:30:00Z" - createdAt: "2024-01-01T00:00:00Z" - expiresAt: "2025-01-01T00:00:00Z" - enabledFeatures: - - "spaces" - - "query-api" - - "backup-restore" -``` - -
- -### Understanding the status fields - -| Field | Description | -|-------|-------------| -| `status.id` | Unique license identifier | -| `status.plan` | Your commercial plan (community, standard, enterprise) | -| `status.capacity` | Total capacity included in your license | -| `status.usage.resourceHours` | Total resource hours consumed | -| `status.usage.operations` | Total operations performed | -| `status.usage.resourceHoursUtilization` | Percentage of resource hours capacity used | -| `status.usage.operationsUtilization` | Percentage of operations capacity used | -| `status.usage.firstMeasurement` | When usage tracking began | -| `status.usage.lastMeasurement` | Most recent usage update | -| `status.expiresAt` | License expiration date | - -### Monitor with kubectl - -Watch your license utilization in real-time: - -```bash -kubectl get spacelicense space -w -``` - -Short output format: - -``` -NAME PLAN VALID REASON AGE -space enterprise True Valid 45d -``` - -## Managing licenses - -### Updating your license - -To update your license with a new license file (for example, when renewing or upgrading capacity), apply the new license: - -```bash -up space license apply /path/to/new-license.json -``` - -This command replaces the existing license secret and updates the SpaceLicense resource. - -### Removing a license - -To remove a license: - -```bash -up space license remove -``` - -This command: -- Prompts for confirmation before proceeding -- Removes the license secret - -To skip the confirmation prompt, use the `--force` flag: - -```bash -up space license remove --force -``` - -## Troubleshooting - -### License not updating - -If the license status doesn't update with usage data: - -1. **Check metering controller logs**: - ```bash - kubectl logs -n upbound-system deployment/spaces-controller -c metering - ``` - -2**Check if the system captures your measurements**: - - ```bash - # Connect to PostgreSQL and query the measurements table - kubectl exec -it -- psql -U -d \ - -c "SELECT COUNT(*) FROM measurements WHERE timestamp > NOW() - INTERVAL '1 hour';" - ``` - -### High utilization warnings - -If you're approaching your capacity limits: - -1. **Review resource usage** by control plane to identify high consumers -2. **Contact your Upbound sales representative** to discuss capacity expansion -3. **Optimize managed resources** by cleaning up unused resources - -### License validation failures - -If your license shows as invalid: - -1. **Check expiration date**: `kubectl get spacelicense space -o jsonpath='{.status.expiresAt}'` -2. **Verify license file integrity**: Ensure the secret contains valid JSON -3. **Check for cluster UUID restrictions**: Upbound pins some licenses to - specific clusters -4. **Review controller logs** for detailed error messages - -## Differences from traditional billing - -### Capacity licensing - -- ✅ Works in disconnected environments -- ✅ Provides real-time usage visibility -- ✅ No manual data export required -- ✅ Requires PostgreSQL database -- ✅ Fixed capacity model - -### Traditional billing (object storage) - - -- ❌ Requires periodic manual export -- ❌ Delayed visibility into usage -- ✅ Works with S3/Azure Blob/GCS -- ❌ Requires cloud storage access -- ✅ Pay-as-you-go model - -## Best practices - -### Database management - -1. **Regular backups**: Back up your metering database regularly to preserve usage history -2. **Monitor database size**: Set appropriate retention periods to manage storage growth -3. **Use managed databases**: Consider managed PostgreSQL services for production -4. **Connection pooling**: Use connection pooling for better performance at scale - -### License management - -1. **Monitor utilization**: Set up alerts before reaching 80% capacity -2. **Plan renewals early**: Start renewal discussions 60 days before expiration -3. **Track grace periods**: Note the `gracePeriodEndsAt` date for planning -4. **Secure license files**: Treat license files as sensitive credentials - -### Operational monitoring - -1. **Set up dashboards**: Create Grafana dashboards for usage trends -2. **Enable alerting**: Configure alerts for high utilization and expiration -3. **Regular audits**: Periodically review usage patterns across control planes -4. **Capacity planning**: Use historical data to predict future capacity needs - -## Next steps - -- Learn about [Observability] to monitor your Spaces deployment -- Explore [Backup and Restore][backup-restore] to protect your control plane data -- Review [Self-Hosted Space Billing][space-billing] for the traditional billing model -- Contact [Upbound Sales][sales] to discuss capacity licensing options - - -[space-billing]: /self-hosted-spaces/howtos/billing -[CloudNativePG]: https://cloudnative-pg.io/ -[backups]: https://cloudnative-pg.io/documentation/current/backup_recovery/ -[backup-restore]: /self-hosted-spaces/howtos/backup-and-restore -[sales]: https://www.upbound.io/contact -[eso]: https://external-secrets.io/ -[Observability]: /self-hosted-spaces/howtos/observability - - diff --git a/self-hosted-spaces_versioned_docs/version-1.16/howtos/certs.md b/self-hosted-spaces_versioned_docs/version-1.16/howtos/certs.md deleted file mode 100644 index e517c250e..000000000 --- a/self-hosted-spaces_versioned_docs/version-1.16/howtos/certs.md +++ /dev/null @@ -1,274 +0,0 @@ ---- -title: Istio Ingress Gateway With Custom Certificates -sidebar_position: 20 -description: Install self hosted spaces using istio ingress gateway in a Kind cluster ---- - -:::important -Prerequisites - -- Spaces Token available in a file -- `docker login xpkg.upbound.io -u -p ` -- [`istioctl`][istioctl] installation -- `jq` installation -::: - -This document describes the installation of a self hosted space on an example `kind` -cluster along with Istio Ingress Gateway and certificates. The service mesh and certificates -installation is transferable to self hosted spaces in arbitrary clouds. - -## Create a kind cluster - -```shell -cat < -## Install Istio - - - -:::important -This is an example and not recommended for use in production. -::: - - -1. Create the `istio-values.yaml` file - -```shell -cat > istio-values.yaml << 'EOF' -apiVersion: install.istio.io/v1alpha1 -kind: IstioOperator -spec: - hub: gcr.io/istio-release - components: - ingressGateways: - - enabled: true - name: istio-ingressgateway - k8s: - nodeSelector: - ingress-ready: "true" - overlays: - - apiVersion: apps/v1 - kind: Deployment - name: istio-ingressgateway - patches: - - path: spec.template.spec.containers.[name:istio-proxy].ports - value: - - containerPort: 8080 - hostPort: 80 - - containerPort: 8443 - hostPort: 443 -EOF -``` - -2. Install istio via `istioctl` - -```shell -istioctl install -f istio-values.yaml -``` - -## Create a self-signed Certificate via cert-manager - -:::important -This Certificate manifest creates a self-signed certificate for a proof of concept -environment and isn't recommended for production use cases. -::: - -1. Create the upbound-system namespace - -```shell -kubectl create namespace upbound-system -``` - -2. Create a self-signed certificate - -```shell -cat < -## Create an Istio Gateway and VirtualService - - - - -Configure an Istio Gateway and VirtualService to use TLS passthrough. - - -```shell -cat < spaces-values.yaml << 'EOF' -# Configure spaces-router to use the TLS secret created by cert-manager. -externalTLS: - tlsSecret: - name: example-tls-secret - caBundleSecret: - name: example-tls-secret - key: ca.crt -ingress: - provision: false - # Allow Istio Ingress Gateway to communicate to the spaces-router - namespaceLabels: - kubernetes.io/metadata.name: istio-system - podLabels: - app: istio-ingressgateway - istio: ingressgateway -EOF -``` - -2. Set the required environment variables - -```shell -# Update these according to your account/token file -export SPACES_TOKEN_PATH= -export UPBOUND_ACCOUNT= -# Replace SPACES_ROUTER_HOST with your Spaces ingress hostname -export SPACES_ROUTER_HOST="proxy.upbound-127.0.0.1.nip.io" -export SPACES_VERSION="1.14.1" -``` - -3. Create an image pull secret for Spaces - -```shell -kubectl -n upbound-system create secret docker-registry upbound-pull-secret \ - --docker-server=https://xpkg.upbound.io \ - --docker-username="$(jq -r .accessId $SPACES_TOKEN_PATH)" \ - --docker-password="$(jq -r .token $SPACES_TOKEN_PATH)" -``` - -4. Install the Spaces helm chart - -```shell -# Login to xpkg.upbound.io -jq -r .token $SPACES_TOKEN_PATH | helm registry login xpkg.upbound.io -u $(jq -r .accessId $SPACES_TOKEN_PATH) --password-stdin - -# Install spaces helm chart -helm -n upbound-system upgrade --install spaces \ - oci://xpkg.upbound.io/spaces-artifacts/spaces \ - --version "${SPACES_VERSION}" \ - --set "ingress.host=${SPACES_ROUTER_HOST}" \ - --set "account=${UPBOUND_ACCOUNT}" \ - --set "authentication.hubIdentities=true" \ - --set "authorization.hubRBAC=true" \ - --wait -f spaces-values.yaml -``` - -## Validate the installation - -Successful access of the `up` command to interact with your self hosted space validates the -certificate installation. - -- `up ctx .` - -You can also issue control plane creation, list and deletion commands. - -- `up ctp create cert-test` -- `up ctp list` -- `up ctx disconnected/kind-kind/default/cert-test && kubectl get namespace` -- `up ctp delete cert-test` - -:::note -If `up` can't connect to your control plane, follow [this guide to create a new profile][up-profile]. -::: - -## Troubleshooting - -Examine your certificate with `openssl`: - -```shell -openssl s_client -connect proxy.upbound-127.0.0.1.nip.io:443 -showcerts -``` - -[istioctl]: https://istio.io/latest/docs/ops/diagnostic-tools/istioctl/ -[up-profile]: /manuals/cli/howtos/profile-config/ diff --git a/self-hosted-spaces_versioned_docs/version-1.16/howtos/configure-ha.md b/self-hosted-spaces_versioned_docs/version-1.16/howtos/configure-ha.md deleted file mode 100644 index af3657b6f..000000000 --- a/self-hosted-spaces_versioned_docs/version-1.16/howtos/configure-ha.md +++ /dev/null @@ -1,445 +0,0 @@ ---- -title: Production Scaling and High Availability -description: Configure your Self-Hosted Space for production -sidebar_position: 5 ---- - - - -This guide explains how to configure an existing Upbound Space deployment for -production operation at scale. - -Use this guide when you're ready to deploy production scaling, high availability, -and monitoring in your Space. - - -## Prerequisites - -Before you begin scaling your Spaces deployment, make sure you have: - - -* A working Space deployment -* Cluster administrator access -* An understanding of load patterns and growth in your organization -* A familiarity with node affinity, tainting, and Horizontal Pod Autoscaling - (HPA) - - -## Production scaling strategy - - -In this guide, you will: - - - -* Create dedicated node pools for different component types -* Configure high-availability to ensure there are no single points of failure -* Set dynamic scaling for variable workloads -* Optimize your storage and component operations -* Monitor your deployment health and performance - -## Spaces architecture - -The basic Spaces workflow follows the pattern below: - - -![Spaces workflow][spaces-workflow] - -## Node architecture - -You can mitigate resource contention and improve reliability by separating system -components into dedicated node pools. - -### `etcd` dedicated nodes - -`etcd` performance directly impacts your entire Space, so isolate it for -consistent performance. - -1. Create a dedicated `etcd` node pool - - **Requirements:** - - **Minimum**: 3 nodes for HA - - **Instance type**: General purpose with high network throughput/low latency - - **Storage**: High performance storage (`etcd` is I/O sensitive) - -2. Taint `etcd` nodes to reserve them - - ```bash - kubectl taint nodes target=etcd:NoSchedule - ``` - -3. Configure `etcd` storage - - `etcd` is sensitive to storage I/O performance. Review the [`etcd` scaling - documentation][scaling] - for specific storage guidance. - -### API server dedicated nodes - -API servers handle all control plane requests and should run on dedicated -infrastructure. - -1. Create dedicated API server nodes - - **Requirements:** - - **Minimum**: 2 nodes for HA - - **Instance type**: Compute-optimized, memory-optimized, or general-purpose - - **Scaling**: Scale vertically based on API server load patterns - -2. Taint API server nodes - - ```bash - kubectl taint nodes target=apiserver:NoSchedule - ``` - -### Configure cluster autoscaling - -Enable cluster autoscaling for all node pools. - -For AWS EKS clusters, Upbound recommends using [`Karpenter`][karpenter] for -improved bin-packing and instance type selection. - -For GCP GKE clusters, follow the [GKE autoscaling][gke-autoscaling] guide. - -For Azure AKS clusters, follow the [AKS autoscaling][aks-autoscaling] guide. - - -## Configure high availability - -Ensure control plane components can survive node and zone failures. - -### Enable high availability mode - -1. Configure control planes for high availability - - ```yaml - controlPlanes: - ha: - enabled: true - ``` - - This configures control plane pods to run with multiple replicas and - associated pod disruption budgets. - -### Configure component distribution - -1. Set up API server pod distribution - - ```yaml - controlPlanes: - vcluster: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: target - operator: In - values: - - apiserver - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - vcluster - topologyKey: "kubernetes.io/hostname" - preferredDuringSchedulingIgnoredDuringExecution: - - podAffinityTerm: - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - vcluster - topologyKey: topology.kubernetes.io/zone - weight: 100 - ``` - -2. Configure `etcd` pod distribution - - ```yaml - controlPlanes: - etcd: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: target - operator: In - values: - - etcd - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - vcluster-etcd - topologyKey: "kubernetes.io/hostname" - preferredDuringSchedulingIgnoredDuringExecution: - - podAffinityTerm: - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - vcluster-etcd - topologyKey: topology.kubernetes.io/zone - weight: 100 - ``` - -### Configure tolerations - -Allow control plane pods to schedule on the tainted dedicated nodes (available -in Spaces v1.14+). - -1. Add tolerations for `etcd` pods - - ```yaml - controlPlanes: - etcd: - tolerations: - - key: "target" - operator: "Equal" - value: "etcd" - effect: "NoSchedule" - ``` - -2. Add tolerations for API server pods - - ```yaml - controlPlanes: - vcluster: - tolerations: - - key: "target" - operator: "Equal" - value: "apiserver" - effect: "NoSchedule" - ``` - - -## Configure autoscaling for Spaces components - - -Set up the Spaces system components to handle variable load automatically. - -### Scale API and `apollo` services - -1. Configure minimum replicas for availability - - ```yaml - api: - replicaCount: 2 - - features: - alpha: - apollo: - enabled: true - replicaCount: 2 - ``` - - Both services support horizontal and vertical scaling based on load patterns. - -### Configure router autoscaling - -The `spaces-router` is the entry point for all traffic and needs intelligent -scaling. - - -1. Enable Horizontal Pod Autoscaler - - ```yaml - router: - hpa: - enabled: true - minReplicas: 2 - maxReplicas: 8 - targetCPUUtilizationPercentage: 80 - targetMemoryUtilizationPercentage: 80 - ``` - -2. Monitor scaling factors - - **Router scaling behavior:** - - **Vertical scaling**: Scales based on number of control planes - - **Horizontal scaling**: Scales based on request volume - - **Resource monitoring**: Monitor CPU and memory usage - - - -### Configure controller scaling - -The `spaces-controller` manages Space-level resources and requires vertical -scaling. - -1. Configure adequate resources with headroom - - ```yaml - controller: - resources: - requests: - cpu: "500m" - memory: "1Gi" - limits: - cpu: "2000m" - memory: "4Gi" - ``` - - **Important**: The controller can spike when reconciling large numbers of - control planes, so provide adequate headroom for resource spikes. - -## Set up production storage - - -### Configure Query API database - - -1. Use a managed PostgreSQL database - - **Recommended services:** - - [AWS RDS][rds] - - [Google Cloud SQL][gke-sql] - - [Azure Database for PostgreSQL][aks-sql] - - **Requirements:** - - Minimum 400 IOPS performance - - -## Monitoring - - - -Monitor key metrics to ensure healthy scaling and identify issues quickly. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -### Control plane health - -Track these `spaces-controller` metrics: - -1. **Total control planes** - - ``` - spaces_control_plane_exists - ``` - - Tracks the total number of control planes in the system. - -2. **Degraded control planes** - - ``` - spaces_control_plane_degraded - ``` - - Returns control planes that don't have a `Synced`, `Ready`, and - `Healthy` state. - -3. **Stuck control planes** - - ``` - spaces_control_plane_stuck - ``` - - Control planes stuck in a provisioning state. - -4. **Deletion issues** - - ``` - spaces_control_plane_deletion_stuck - ``` - - Control planes stuck during deletion. - -### Alerting - -Configure alerts for critical scaling and health metrics: - -- **High error rates**: Alert when 4xx/5xx response rates exceed thresholds -- **Control plane health**: Alert when degraded or stuck control planes exceed acceptable counts - -## Architecture overview - -**Spaces System Components:** - -- **`spaces-router`**: Entry point for all endpoints, dynamically builds routes to control plane API servers -- **`spaces-controller`**: Reconciles Space-level resources, serves webhooks, works with `mxp-controller` for provisioning -- **`spaces-api`**: API for managing groups, control planes, shared secrets, and telemetry objects (accessed only through spaces-router) -- **`spaces-apollo`**: Hosts the Query API, connects to PostgreSQL database populated by `apollo-syncer` pods - - -**Control Plane Components (per control plane):** -- **`mxp-controller`**: Handles provisioning tasks, serves webhooks, installs UXP and `XGQL` -- **`XGQL`**: GraphQL API powering console views -- **`kube-state-metrics`**: Collects usage metrics for billing (updated by `mxp-controller` when CRDs change) -- **`vector`**: Works with `kube-state-metrics` to send usage data to external storage for billing -- **`apollo syncer`**: Syncs `etcd` data into PostgreSQL for the Query API - - -### `up ctx` workflow - - - up ctx workflow diagram - - -### Access a control plane API server via kubectl - - - kubectl workflow diagram - - -### Query API/Apollo - - - query API workflow diagram - - -## See also - -* [Upbound Spaces deployment requirements][deployment] -* [Upbound `etcd` scaling resources][scaling] - -[up-ctx-workflow]: /img/up-ctx-workflow.png -[kubectl]: /img/kubectl-workflow.png -[query-api]: /img/query-api-workflow.png -[spaces-workflow]: /img/up-basic-flow.png -[rds]: https://aws.amazon.com/rds/postgresql/ -[gke-sql]: https://cloud.google.com/kubernetes-engine/docs/tutorials/stateful-workloads/postgresql -[aks-sql]: https://learn.microsoft.com/en-us/azure/aks/deploy-postgresql-ha?tabs=azuredisk -[deployment]: https://docs.upbound.io/self-hosted-spaces/howtos/deployment-reqs/ -[karpenter]: https://docs.aws.amazon.com/eks/latest/best-practices/karpenter.html -[gke-autoscaling]: https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-autoscaler -[aks-autoscaling]: https://learn.microsoft.com/en-us/azure/aks/cluster-autoscaler-overview -[scaling]: https://docs.upbound.io/deploy/self-hosted-spaces/scaling-resources#scaling-etcd-storage diff --git a/self-hosted-spaces_versioned_docs/version-1.16/howtos/control-plane-topologies.md b/self-hosted-spaces_versioned_docs/version-1.16/howtos/control-plane-topologies.md deleted file mode 100644 index d845636b7..000000000 --- a/self-hosted-spaces_versioned_docs/version-1.16/howtos/control-plane-topologies.md +++ /dev/null @@ -1,553 +0,0 @@ ---- -title: Control Plane Topologies -sidebar_position: 15 -description: Configure scheduling of composites to remote control planes ---- - - -:::important -This feature is in private preview for select customers in Upbound Spaces. If you're interested in this deployment mode, [contact Upbound](https://www.upbound.io/support/contact). -::: - -Upbound's _Control Plane Topology_ feature lets you build and deploy a platform -of multiple control planes. These control planes work together for a unified platform -experience. - - -With the _Topology_ feature, you can install resource APIs that other control planes reconcile and configure the routing between them. You can also build compositions that reference other resources running on your control plane or elsewhere in Upbound. - -This guide explains how to use Control Plane Topology APIs to install, configure -remote APIs, and build powerful compositions that reference other resources. - -## Benefits - -The Control Plane Topology feature provides the following benefits: - -* Decouple your platform architecture into independent offerings to improve your platform's software development lifecycle. -* Install composite APIs from Configurations as CRDs that other control planes fulfill and reconcile. -* Route APIs to other control planes by configuring an _Environment_ resource, which define a set of routable dimensions. - -## How it works - - -Imagine the scenario where you want to let a user reference a subnet when creating a database instance. To your control plane, the `kind: database` and `kind: subnet` are independent resources. To you as the composition author, these resources have an important relationship. It may be that: - -- you don't want your user to ever be able to create a database without specifying a subnet. -- you want to let them create a subnet when they create the database, if it doesn't exist. -- you want to allow them to reuse a subnet that got created elsewhere or gets shared by another user. - -In each of these scenarios, you must resort to writing complex composition logic -to handle each case. The problem worsens when the resource exists in a -context separate from the current control plane's context. Imagine a scenario -where one control plane manages Database resources and a second control plane -manages networking resources. With the _Topology_ feature, you can offload these -concerns to Upbound machinery. - - -![Control Plane Topology feature arch](/img/topology-arch.png) - -## Prerequisites - -Enable the Control Plane Topology feature in the Space you plan to run your control plane in: - -- Cloud Spaces: Not available yet -- Connected Spaces: Space administrator must enable this feature -- Disconnected Spaces: Space administrator must enable this feature - - - -## Compose resources with _ReferencedObjects_ - - - -_ReferencedObject_ is a resource type available in an Upbound control plane that lets you reference other Kubernetes resources in Upbound. - -:::tip -This feature is useful for composing resources that exist in a -remote context, like another control plane. You can also use -_ReferencedObjects_ to resolve references to any other Kubernetes object -in the current control plane context. This could be a secret, another Crossplane -resource, or more. -::: - -### Declare the resource reference in your XRD - -To compose a _ReferencedObject_, you should start by adding a resource reference -in your Composite Resource Definition (XRD). The convention for the resource -reference follows the shape shown below: - -```yaml -Ref: - type: object - properties: - apiVersion: - type: string - default: "" - enum: [ "" ] - kind: - type: string - default: "" - enum: [ "" ] - grants: - type: array - default: [ "Observe" ] - items: - type: string - enum: [ "Observe", "Create", "Update", "Delete", "*" ] - name: - type: string - namespace: - type: string - required: - - name -``` - -The `Ref` should be the kind of resource you want to reference. The `apiVersion` and `kind` should be the associated API version and kind of the resource you want to reference. - -The `name` and `namespace` strings are inputs that let your users specify the resource instance. - -#### Grants - -The `grants` field is a special array that lets you give users the power to influence the behavior of the referenced resource. You can configure which of the available grants you let your user select and which it defaults to. Similar in behavior as [Crossplane management policies][crossplane-management-policies], each grant value does the following: - -- **Observe:** The composite may observe the state of the referenced resource. -- **Create:** The composite may create the referenced resource if it doesn't exist. -- **Update:** The composite may update the referenced resource. -- **Delete:** The composite may delete the referenced resource. -- **\*:** The composite has full control over the referenced resource. - -Here are some examples that show how it looks in practice: - -
- -Show example for defining the reference to another composite resource - -```yaml -apiVersion: apiextensions.crossplane.io/v1 -kind: CompositeResourceDefinition -metadata: - name: xsqlinstances.database.platform.upbound.io -spec: - type: object - properties: - parameters: - type: object - properties: - networkRef: - type: object - properties: - apiVersion: - type: string - default: "networking.platform.upbound.io" - enum: [ "networking.platform.upbound.io" ] - grants: - type: array - default: [ "Observe" ] - items: - type: string - enum: [ "Observe" ] - kind: - type: string - default: "Network" - enum: [ "Network" ] - name: - type: string - namespace: - type: string - required: - - name -``` - -
- - -
-Show example for defining the reference to a secret -```yaml -apiVersion: apiextensions.crossplane.io/v1 -kind: CompositeResourceDefinition -metadata: - name: xsqlinstances.database.platform.upbound.io -spec: - type: object - properties: - parameters: - type: object - properties: - secretRef: - type: object - properties: - apiVersion: - type: string - default: "v1" - enum: [ "v1" ] - grants: - type: array - default: [ "Observe" ] - items: - type: string - enum: [ "Observe", "Create", "Update", "Delete", "*" ] - kind: - type: string - default: "Secret" - enum: [ "Secret" ] - name: - type: string - namespace: - type: string - required: - - name -``` -
- -### Manually add the jsonPath - -:::important -This step is a known limitation of the preview. Upbound is working on tooling that -removes this requirement. -::: - -During the preview timeframe of this feature, you must add an annotation by hand -to the XRD. In your XRD's `metadata.annotations`, set the -`references.upbound.io/schema` annotation. It should be a JSON string in the -following format: - -```json -{ - "apiVersion": "references.upbound.io/v1alpha1", - "kind": "ReferenceSchema", - "references": [ - { - "jsonPath": ".spec.parameters.secretRef", - "kinds": [ - { - "apiVersion": "v1", - "kind": "Secret" - } - ] - } - ] -} -``` - -Flatten this JSON into a string and set the annotation on your XRD. View the -example below for an illustration: - -
-Show example setting the references.upbound.io/schema annotation -```yaml -apiVersion: apiextensions.crossplane.io/v1 -kind: CompositeResourceDefinition -metadata: - name: xthings.networking.acme.com - annotations: - references.upbound.io/schema: '{"apiVersion":"references.upbound.io/v1alpha1","kind":"ReferenceSchema","references":[{"jsonPath":".spec.secretRef","kinds":[{"apiVersion":"v1","kind":"Secret"}]},{"jsonPath":".spec.configMapRef","kinds":[{"apiVersion":"v1","kind":"ConfigMap"}]}]}' -``` -
- -
-Show example for setting multiples references in the references.upbound.io/schema annotation -```yaml -apiVersion: apiextensions.crossplane.io/v1 -kind: CompositeResourceDefinition -metadata: - name: xthings.networking.acme.com - annotations: - references.upbound.io/schema: '{"apiVersion":"references.upbound.io/v1alpha1","kind":"ReferenceSchema","references":[{"jsonPath":".spec.parameters.secretRef","kinds":[{"apiVersion":"v1","kind":"Secret"}]},{"jsonPath":".spec.parameters.configMapRef","kinds":[{"apiVersion":"v1","kind":"ConfigMap"}]}]}' -``` -
- - -You can use a VSCode extension like [vscode-pretty-json][vscode-pretty-json] to make this task easier. - - -### Compose a _ReferencedObject_ - -To pair with the resource reference declared in your XRD, you must compose the referenced resource. Use the _ReferencedObject_ resource type to bring the resource into your composition. _ReferencedObject_ has the following schema: - -```yaml -apiVersion: references.upbound.io/v1alpha1 -kind: ReferencedObject -spec: - managementPolicies: - - Observe - deletionPolicy: Orphan - composite: - apiVersion: - kind: - name: - jsonPath: .spec.parameters.secretRef -``` - -The `spec.composite.apiVersion` and `spec.composite.kind` should match the API version and kind of the `compositeTypeRef` declared in your composition. The `spec.composite.name` should be the name of the composite resource instance. - -The `spec.composite.jsonPath` should be the path to the root of the resource ref you declared in your XRD. - -
-Show example for composing a resource reference to a secret - -```yaml -apiVersion: apiextensions.crossplane.io/v1 -kind: Composition -metadata: - name: demo-composition -spec: - compositeTypeRef: - apiVersion: networking.acme.com/v1alpha1 - kind: XThing - mode: Pipeline - pipeline: - - step: patch-and-transform - functionRef: - name: crossplane-contrib-function-patch-and-transform - input: - apiVersion: pt.fn.crossplane.io/v1beta1 - kind: Resources - resources: - - name: secret-ref-object - base: - apiVersion: references.upbound.io/v1alpha1 - kind: ReferencedObject - spec: - managementPolicies: - - Observe - deletionPolicy: Orphan - composite: - apiVersion: networking.acme.com/v1alpha1 - kind: XThing - name: TO_BE_PATCHED - jsonPath: .spec.parameters.secretRef - patches: - - type: FromCompositeFieldPath - fromFieldPath: metadata.name - toFieldPath: spec.composite.name -``` -
- -By declaring a resource reference in your XRD, Upbound handles resolution of the desired resource. - -## Deploy APIs - -To configure routing resource requests between control planes, you need to deploy APIs in at least two control planes. - -### Deploy into a service-level control plane - -Package the APIs you build into a Configuration package and deploy it on a -control plane in an Upbound Space. In Upbound, you call the control plane hosting the Configuration package a **service-level control plane**. This control plane runs the controllers that process the API requests and provision underlying resources. Later sections explain how to use _Topology_ features to [configure routing][configure-routing]. - -### Deploy as Remote APIs on a platform control plane - -Use the same package source as the **service-level -control planes**, but now deploy the Configuration in a separate control -plane as a _RemoteConfiguration_. The _RemoteConfiguration_ installs Kubernetes -CustomResourceDefinitions for the APIs defined in the Configuration package, but -no controllers get deployed. - -### Install a _RemoteConfiguration_ - -_RemoteConfiguration_ is a resource type available in an Upbound manage control -planes that acts like a sort of Crossplane [Configuration][configuration] -package. Unlike standard Crossplane Configurations, which install XRDs, -compositions, and functions into a desired control plane, _RemoteConfigurations_ -install only the CRDs for claimable composite resource types. - -#### Install directly - -Install a _RemoteConfiguration_ by defining the following and applying it to -your control plane: - -```yaml -apiVersion: pkg.upbound.io/v1alpha1 -kind: RemoteConfiguration -metadata: - name: -spec: - package: -``` - -#### Declare as a project dependency - -You can declare _RemoteConfigurations_ as dependencies in your control plane's -[project file][project-file]. Use the up CLI to add the dependency, providing -the `--remote` flag: - -```tsx live -up dep add --remote -``` - -This command adds a declaration in the `spec.apiDependencies` stanza of your -project's `upbound.yaml` as demonstrated below: - -```yaml -apiVersion: meta.dev.upbound.io/v1alpha1 -kind: Project -metadata: - name: service-controlplane -spec: - apiDependencies: - - configuration: xpkg.upbound.io/upbound/remote-configuration - version: '>=v0.0.0' - dependsOn: - - provider: xpkg.upbound.io/upbound/provider-kubernetes - version: '>=v0.0.0' -``` - -Like a Configuration, a _RemoteConfigurationRevision_ gets created when the -package gets installed on a control plane. Unlike Configurations, XRDs and -compositions **don't** get installed by a _RemoteConfiguration_. Only the CRDs -for claimable composite types get installed and Crossplane thereafter manages -their lifecycle. You can tell when a CRD gets installed by a -_RemoteConfiguration_ because it has the `internal.scheduling.upbound.io/remote: -true` label: - -```yaml -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: things.networking.acme.com - labels: - internal.scheduling.upbound.io/remote: "true" -``` - -## Use an _Environment_ to route resources - -Environment is a namespace-scoped resource type that works with resources -deployed with RemoteConfigurations. It allows you to configure how to route -remote resources to other control planes by a set of user-defined dimensions. - -### Define a routing dimension - -To establish a routing dimensions between two control planes, you must do two -things: - -1. Annotate the service control plane with the name and value of a dimension. -2. Configure an environment on another control plane with a dimension matching the field and value of the service control plane. - -The example below demonstrates the creation of a service control plane with a -`region` dimension: - -```yaml -apiVersion: spaces.upbound.io/v1beta1 -kind: ControlPlane -metadata: - labels: - dimension.scheduling.upbound.io/region: "us-east-1" - name: prod-1 - namespace: default -spec: -``` - -Upbound's Spaces controller keeps an inventory of all declared dimensions and -listens for control planes to route to them. - -### Create an _Environment_ - -Next, create an _Environment_ on a separate control plane, referencing the -dimension from before. The example below demonstrates routing all remote -resource requests in the `default` namespace of the control plane based on a -single `region` dimension: - -```yaml -apiVersion: scheduling.upbound.io/v1alpha1 -kind: Environment -metadata: - name: default - namespace: default -spec: - dimensions: - region: us-east-1 -``` - -You can specify whichever dimensions as you want. The example below demonstrates -multiple dimensions: - -```yaml -apiVersion: scheduling.upbound.io/v1alpha1 -kind: Environment -metadata: - name: default - namespace: default -spec: - dimensions: - region: us-east-1 - env: prod - offering: databases -``` - -In order for the routing controller to match, _all_ dimensions must match for a -given service control plane. - -You can specify dimension overrides on a per-resource group basis. This lets you -configure default routing rules for a given _Environment_ and override routing -on a per-offering basis. - -```yaml -apiVersion: scheduling.upbound.io/v1alpha1 -kind: Environment -metadata: - name: default - namespace: default -spec: - dimensions: - region: us-east-1 - resourceGroups: - - name: database.platform.upbound.io # database - dimensions: - region: "us-east-1" - env: "prod" - offering: "databases" - - name: networking.platform.upbound.io # networks - dimensions: - region: "us-east-1" - env: "prod" - offering: "networks" -``` - -### Confirm the configured route - -After you create an _Environment_ on a control plane, the routes selected get -reported in the _Environment's_ `.status.resourceGroups`. The following example -demonstrates this: - -```yaml -apiVersion: scheduling.upbound.io/v1alpha1 -kind: Environment -metadata: - name: default -... -status: - resourceGroups: - - name: database.platform.upbound.io # database - proposed: - controlPlane: ctp-1 - group: default - space: upbound-gcp-us-central1 - dimensions: - region: "us-east-1" - env: "prod" - offering: "databases" -``` - -If you don't see a response in the `.status.resourceGroups`, this indicates a -match wasn't found or an error establishing routing occurred. - -:::tip -There's no limit to the number of control planes you can route to. You can also -stack routing and form your own topology of control planes, with multiple layers -of routing. -::: - -### Limitations - - -Routing from one control plane to another is currently scoped to control planes -that exist in a single Space. You can't route resource requests to control -planes that exist on a cross-Space boundary. - - -[project-file]: /manuals/cli/howtos/project -[contact-us]: https://www.upbound.io/usage/support/contact -[crossplane-management-policies]: https://docs.crossplane.io/latest/managed-resources/managed-resources/#managementpolicies -[vscode-pretty-json]: https://marketplace.visualstudio.com/items?itemName=chrismeyers.vscode-pretty-json -[configure-routing]: #use-an-environment-to-route-resources -[configuration]: https://docs.crossplane.io/latest/packages/providers diff --git a/self-hosted-spaces_versioned_docs/version-1.16/howtos/controllers.md b/self-hosted-spaces_versioned_docs/version-1.16/howtos/controllers.md deleted file mode 100644 index de2e5902d..000000000 --- a/self-hosted-spaces_versioned_docs/version-1.16/howtos/controllers.md +++ /dev/null @@ -1,428 +0,0 @@ ---- -title: Controllers -weight: 250 -description: A guide to how to wrap and deploy an Upbound controller into control planes on Upbound. ---- - -:::important -This feature is in private preview for select customers in Upbound Spaces. If you're interested in this feature, please [contact us](https://www.upbound.io/contact-us). -::: - - -Upbound's _Controllers_ feature lets you build and deploy control plane software from the Kubernetes ecosystem. With the _Controllers_ feature, you're not limited to just managing resource types defined by Crossplane. Now you can create resources from _CustomResourceDefinitions_ defined by other Kubernetes ecosystem tooling. - -This guide explains how to bundle and deploy control plane software from the Kubernetes ecosystem on a control plane in Upbound. - -## Benefits - -The Controllers feature provides the following benefits: - -* Deploy control plane software from the Kubernetes ecosystem. -* Use your control plane's package manager to handle the lifecycle of the control plane software and define dependencies between package. -* Build powerful compositions that combine both Crossplane and Kubernetes _CustomResources_. - - -## How it works - - -A _Controller_ is a package type that bundles control plane software from the Kubernetes ecosystem. Examples of such software includes: - -- Kubernetes policy engines -- CI/CD tooling -- Your own private custom controllers defined by your organization - - -You build a _Controller_ package by wrapping a helm chart along with its -requisite _CustomResourceDefinitions_. Your _Controller_ package gets pushed to -an OCI registry, and from there you can apply it to a control plane like you -would any other Crossplane package. The control plane's package manager manages -the lifecycle of the software once applied. - - -## Prerequisites - -Enable the Controllers feature in the Space you plan to run your control plane in: - -- Cloud Spaces: Not available yet -- Connected Spaces: Space administrator must enable this feature -- Disconnected Spaces: Space administrator must enable this feature - -Packaging a _Controller_ requires [up CLI][cli] `v0.39.0` or later. - - - -## Build a _Controller_ package - - - -_Controllers_ are a package type that get administered by your control plane's package manager. - -### Prepare the package - -To define a _Controller_, you need a Helm chart. This guide assumes the control plane software you want to build into a _Controller_ already has a Helm chart available. - -Start by making a working directory to assemble the necessary parts: - -```ini -mkdir controller-package -cd controller-package -``` - -Inside the working directory, pull the Helm chart: - -```shell -export CHART_REPOSITORY= -export CHART_NAME= -export CHART_VERSION= - -helm pull $CHART_NAME --repo $CHART_REPOSITORY --version $CHART_VERSION -``` - -Be sure to update the Helm chart repository, name, and version with your own. - -Move the Helm chart into its own folder: - -```ini -mkdir helm -mv $CHART_NAME-$CHART_VERSION.tgz helm/chart.tgz -``` - -Unpack the CRDs from the Helm chart into their own directory: - -```shell -export RELEASE_NAME= -export RELEASE_NAMESPACE= - -mkdir crds -helm template $RELEASE_NAME helm/chart.tgz -n $RELEASE_NAMESPACE --include-crds | \ - yq e 'select(.kind == "CustomResourceDefinition")' - | \ - yq -s '("crds/" + .metadata.name + ".yaml")' - -``` -Be sure to update the Helm release name, and namespace with your own. - -:::info -The instructions above assume your CRDs get deployed as part of your Helm chart. If they're deployed another way, you need to manually copy your CRDs instead. -::: - -Create a `crossplane.yaml` with your controller metadata: - -```yaml -cat < crossplane.yaml -apiVersion: meta.pkg.upbound.io/v1alpha1 -kind: Controller -metadata: - annotations: - friendly-name.meta.crossplane.io: Controller - meta.crossplane.io/description: | - A brief description of what the controller does. - meta.crossplane.io/license: Apache-2.0 - meta.crossplane.io/maintainer: - meta.crossplane.io/readme: | - An explanation of your controller. - meta.crossplane.io/source: - name: -spec: - packagingType: Helm - helm: - releaseName: - releaseNamespace: - # Value overrides for the helm release can be provided below. - # values: - # foo: bar -EOF -``` - - -The controller's file structure should look like this: - -```ini -. -├── crds -│ ├── your-crd.yaml -│ ├── second-crd.yaml -│ └── another-crd.yaml -├── crossplane.yaml -└── helm - └── chart.tgz -``` - -### Package and push the _Controller_ - -At the root of your controller's working directory, build the contents into an xpkg: - -```ini -up xpkg build -``` - -This causes an xpkg to get saved to your current directory with a name like `controller-f7091386b4c0.xpkg`. - -Push the package to your desired OCI registry: - -```shell -export UPBOUND_ACCOUNT= -export CONTROLLER_NAME= -export CONTROLLER_VERSION= -export XPKG_FILENAME= - -up xpkg push xpkg.upbound.io/$UPBOUND_ACCOUNT/$CONTROLLER_NAME:$CONTROLLER_VERSION -f $XPKG_FILENAME -``` - - - -## Deploy a _Controller_ package - - - -:::important -_Controllers_ are only installable on control planes running Crossplane `v1.19.0` or later. -::: - -Set your kubecontext to the desired control plane in Upbound. Change the package path to the OCI registry you pushed it to. Then, deploy the _Controller_ directly: - -```shell -export CONTROLLER_NAME= -export CONTROLLER_VERSION= - -cat < crossplane.yaml -apiVersion: meta.pkg.upbound.io/v1alpha1 -kind: Controller -metadata: - annotations: - friendly-name.meta.crossplane.io: Controller ArgoCD - meta.crossplane.io/description: | - The ArgoCD Controller enables continuous delivery and declarative configuration - management for Kubernetes applications using GitOps principles. - meta.crossplane.io/license: Apache-2.0 - meta.crossplane.io/maintainer: Upbound Maintainers - meta.crossplane.io/readme: | - ArgoCD is a declarative GitOps continuous delivery tool for Kubernetes that - follows the GitOps methodology to manage infrastructure and application - configurations. - meta.crossplane.io/source: https://github.com/argoproj/argo-cd - name: argocd -spec: - packagingType: Helm - helm: - releaseName: argo-cd - releaseNamespace: argo-system - # values: - # foo: bar -EOF -``` - -The controller's file structure should look like this: - -```ini -. -├── crds -│ ├── applications.argoproj.io.yaml -│ ├── applicationsets.argoproj.io.yaml -│ └── appprojects.argoproj.io.yaml -├── crossplane.yaml -└── helm - └── chart.tgz -``` - -### Package and push controller-argocd - -At the root of your controller's working directory, build the contents into an xpkg: - -```ini -up xpkg build -``` - -This causes an xpkg to get saved to your current directory with a name like `argocd-f7091386b4c0.xpkg`. - -Push the package to your desired OCI registry: - -```shell -export UPBOUND_ACCOUNT= -export CONTROLLER_NAME=controller-argocd -export CONTROLLER_VERSION=v7.8.8 -export XPKG_FILENAME= - -up xpkg push --create xpkg.upbound.io/$UPBOUND_ACCOUNT/$CONTROLLER_NAME:$CONTROLLER_VERSION -f $XPKG_FILENAME -``` - -### Deploy controller-argocd to a control plane - -Set your kubecontext to the desired control plane in Upbound. Change the package path to the OCI registry you pushed it to. Then, deploy the _Controller_ directly: - -```ini -cat < - -## Frequently asked questions - -
-Can I package any software or are there any prerequisites to be a Controller? - -We define a *Controller* as a software that has at least one Custom Resource Definition (CRD) and a Kubernetes controller for that CRD. This is the minimum requirement to be a *Controller*. We have some checks to enforce this at packaging time. - -
- -
-How can I package my software as a Controller? - -Currently, we support Helm charts as the underlying package format for *Controllers*. As long as you have a Helm chart, you can package it as a *Controller*. - -If you don't have a Helm chart, you can't deploy the software. We only support Helm charts as the underlying package format for *Controllers*. We may extend this to support other packaging formats like Kustomize in the future. - -
- -
-Can I package Crossplane XRDs/Compositions as a Helm chart to deploy as a Controller? - -This is not recommended. For packaging Crossplane XRDs/ and Compositions, we recommend using the `Configuration` package format. A helm chart only with Crossplane XRDs/Compositions does not qualify as a *Controller*. - -
- -
-How can I override the Helm values when deploying a Controller? - -Overriding the Helm values is possible at two levels: -- During packaging time, in the package manifest file. -- At runtime, using a `ControllerRuntimeConfig` resource (similar to Crossplane `DeploymentRuntimeConfig`). - -Create a `ControllerRuntimeConfig` with your Helm value overrides under -`spec.helm.values`, then reference it from the Controller via -`spec.runtimeConfigRef.name`. If you omit `runtimeConfigRef`, the Controller uses -the config named `default`. - -Example of overriding replica count and image for a Controller at runtime: - -```yaml -apiVersion: pkg.upbound.io/v1alpha1 -kind: ControllerRuntimeConfig -metadata: - name: example-controllerruntimeconfig -spec: - helm: - values: - replicaCount: 2 - image: - tag: "v1.2.3" - # Add any other Helm chart values your controller chart supports ---- -apiVersion: pkg.upbound.io/v1alpha1 -kind: Controller -metadata: - name: my-controller -spec: - package: xpkg.upbound.io/your-org/your-controller:v1.0.0 - runtimeConfigRef: - name: example-controllerruntimeconfig -``` - -
- -
-How can I configure the helm release name and namespace for the controller? - -Right now, it is not possible to configure this at runtime. The package author configures release name and namespace during packaging, so it is hardcoded inside the package. Unlike a regular application that is deployed by a Helm chart, *Controllers* can only be deployed once in a given control plane, so, we hope it should be ok to rely on predefined release names and namespaces. We may consider exposing these in `ControllerRuntimeConfig` later, but, we would like to keep it opinionated unless there are strong reasons to do so. - -
- -
-Can I deploy more than one instance of a Controller package? - -No, this is not possible. Remember, a *Controller* package introduces CRDs which are cluster-scoped objects. Just like one cannot deploy more than one instance of the same Crossplane Provider package today, it is not possible to deploy more than one instance of a *Controller*. - -
- -
-Do I need a specific Crossplane version to run Controllers? - -Yes, you need to use Crossplane v1.19.0 or later to use *Controllers*. This is because of the changes in the Crossplane codebase to support third-party package formats in dependencies. - -Spaces `v1.12.0` supports Crossplane `v1.19` in the *Rapid* release channel. - -
- -
-Can I deploy Controllers outside of an Upbound control plane? With UXP? - -No, *Controllers* are a proprietary package format and are only available for control planes running in Spaces hosting environments in Upbound. - -
- - -[cli]: /manuals/uxp/overview diff --git a/self-hosted-spaces_versioned_docs/version-1.16/howtos/ctp-audit-logs.md b/self-hosted-spaces_versioned_docs/version-1.16/howtos/ctp-audit-logs.md deleted file mode 100644 index 187f6785b..000000000 --- a/self-hosted-spaces_versioned_docs/version-1.16/howtos/ctp-audit-logs.md +++ /dev/null @@ -1,544 +0,0 @@ ---- -title: Control plane audit logging ---- - -This guide explains how to enable and configure audit logging for control planes -in Self-Hosted Upbound Spaces. - -Starting in Spaces `v1.14.0`, each control plane contains an API server that -supports audit log collection. You can use audit logging to track creation, -updates, and deletions of Crossplane resources. Control plane audit logs -use observability features to collect audit logs with `SharedTelemetryConfig` and -send logs to an OpenTelemetry (`OTEL`) collector. - - -## Prerequisites - -Before you begin, make sure you have: - -* Spaces `v1.14.0` or greater -* Admin access to your Spaces host cluster -* `kubectl` configured to access the host cluster -* `helm` installed -* `yq` installed -* `up` CLI installed and logged in to your organization - -## Enable observability - - -Observability graduated to General Available in `v1.14.0` but is disabled by -default. - - - - - -### Before `v1.14` -To enable the GA Observability feature, upgrade your Spaces installation to `v1.14.0` -or later and update your installation setting to the new flag: - -```diff -helm upgrade spaces upbound/spaces -n upbound-system \ -- --set "features.alpha.observability.enabled=true" -+ --set "observability.enabled=true" -``` - - - -### After `v1.14` - -To enable the GA Observability feature for `v1.14.0` and later, pass the feature -flag: - -```sh -helm upgrade spaces upbound/spaces -n upbound-system \ - --set "observability.enabled=true" - -``` - - - - -To confirm Observability is enabled, run the `helm get values` command: - - -```shell -helm get values --namespace upbound-system spaces | yq .observability -``` - -Your output should return: - -```shell-noCopy - enabled: true -``` - -## Install an observability backend - -:::note -If you already have an observability backend in your environment, skip to the -next section. -::: - - -For this guide, you'll use Grafana's `docker-otel-lgtm` bundle to validate audit log -generation. For production environments, configure a dedicated observability -backend like Datadog, Splunk, or an enterprise-grade Grafana stack. - - - -First, make sure your `kubectl` context points to your Spaces host cluster: - -```shell -kubectl config current-context -``` - -The output should return your cluster name. - -Next, install `docker-otel-lgtm` as a deployment using port-forwarding to -connect to Grafana. Create a manifest file and paste the -following configuration: - -```yaml title="otel-lgtm.yaml" -apiVersion: v1 -kind: Namespace -metadata: - name: observability ---- -apiVersion: v1 -kind: Service -metadata: - labels: - app: otel-lgtm - name: otel-lgtm - namespace: observability -spec: - ports: - - name: grpc - port: 4317 - protocol: TCP - targetPort: 4317 - - name: http - port: 4318 - protocol: TCP - targetPort: 4318 - - name: grafana - port: 3000 - protocol: TCP - targetPort: 3000 - selector: - app: otel-lgtm ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: otel-lgtm - labels: - app: otel-lgtm - namespace: observability -spec: - replicas: 1 - selector: - matchLabels: - app: otel-lgtm - template: - metadata: - labels: - app: otel-lgtm - spec: - containers: - - name: otel-lgtm - image: grafana/otel-lgtm - ports: - - containerPort: 4317 - - containerPort: 4318 - - containerPort: 3000 -``` - -Next, apply the manifest: - -```shell -kubectl apply --filename otel-lgtm.yaml -``` - -Your output should return the resources: - -```shell -namespace/observability created - service/otel-lgtm created - deployment.apps/otel-lgtm created -``` - -To verify your resources deployed, use `kubectl get` to display resources with -an `ACTIVE` or `READY` status. - -Next, forward the Grafana port: - -```shell -kubectl port-forward svc/otel-lgtm --namespace observability 3000:3000 -``` - -Now you can access the Grafana UI at http://localhost:3000. - - -## Create an audit-enabled control plane - -To enable audit logging for a control plane, you need to label it so the -`SharedTelemetryConfig` can identify and apply audit settings. This section -creates a new control plane with the `audit-enabled: "true"` label. The -`audit-enabled: "true"` label marks this control plane for audit logging. The -`SharedTelemetryConfig` (created in the next section) finds control planes with -this label and enables audit logging on them. - -Create a new manifest file and paste the configuration below: - -
-```yaml title="ctp-audit.yaml" -apiVersion: v1 -kind: Namespace -metadata: - name: audit-test ---- -apiVersion: spaces.upbound.io/v1beta1 -kind: ControlPlane -metadata: - labels: - audit-enabled: "true" - name: ctp1 - namespace: audit-test -spec: - writeConnectionSecretToRef: - name: kubeconfig-ctp1 - namespace: audit-test -``` -
- -The `metadata.labels` section contains the `audit-enabled` setting. - -Apply the manifest: - -```shell -kubectl apply --filename ctp-audit.yaml -``` - -Confirm your control plane reaches the `READY` status: - -```shell -kubectl get --filename ctp-audit.yaml -``` - -## Create a `SharedTelemetryConfig` - -The `SharedTelemetryConfig` applies to all control plane objects in a namespace -and enables audit logging and routes logs to your `OTEL` endpoint. - -Create a `SharedTelemetryConfig` manifest file and paste the configuration -below: - -
-```yaml title="sharedtelemetryconfig.yaml" -apiVersion: observability.spaces.upbound.io/v1alpha1 -kind: SharedTelemetryConfig -metadata: - name: apiserver-audit - namespace: audit-test -spec: - apiServer: - audit: - enabled: true - exporters: - otlphttp: - endpoint: http://otel-lgtm.observability:4318 - exportPipeline: - logs: [otlphttp] - controlPlaneSelector: - labelSelectors: - - matchLabels: - audit-enabled: "true" -``` -
- -This configuration: - -* Sets `apiServer.audit.enabled` to `true` -* Configures the `otlphttp` exporter to point to the `docker-otel-lgtm` service -* Uses `controlPlaneSelector` to match any control plane in the namespace with the `audit-enabled` label set to `true` - -:::note -You can configure the `SharedTelemetryConfig` to select control planes in -several ways. For more information on control plane selection, see the [control -plane selection][ctp-selection] documentation. -::: - -Apply the `SharedTelemetryConfig`: - -```shell -kubectl apply --filename sharedtelemetryconfig.yaml -``` - -Confirm the configuration selected the control plane: - -```shell -kubectl get --filename sharedtelemetryconfig.yaml -``` - -The output should return `SELECTED` as `1` and `VALIDATED` as `TRUE`. - -For more detailed status information, use `kubectl get`: - -```shell -kubectl get --filename sharedtelemetryconfig.yaml --output yaml | yq .status -``` - -## Generate and monitor audit events - -You enabled telemetry on your new control plane and can now generate events to -test the audit logging. This guide uses the `nop-provider` to simulate resource -operations. - -Switch your `up` context to the new control plane: - -```shell -up ctx /// -``` - -Create a new Provider manifest: - -```yaml title="provider-nop.yaml" -apiVersion: pkg.crossplane.io/v1 - kind: Provider - metadata: - name: crossplane-contrib-provider-nop - spec: - package: xpkg.upbound.io/crossplane-contrib/provider-nop:v0.4.0 -``` - -Apply the provider manifest: - -```shell -kubectl apply --filename provider-nop.yaml -``` - -Verify the provider installed and returns `HEALTHY` status as `TRUE`. - -Apply an example resource to kick off event generation: - - -```shell -kubectl apply --filename https://raw.githubusercontent.com/crossplane-contrib/provider-nop/refs/heads/main/examples/nopresource.yaml -``` - -In your Grafana dashboard, navigate to **Drilldown** > **Logs** under the -Grafana menu. - - -Filter for `controlplane-audit` log messages. - -Create a query to find `create` events on `nopresources` by filtering: - -* The `verb` field for `create` events -* The `objectRef_resource` field to match the Kind `nopresources` - -Review the audit log results. The log stream displays: - -*The client applying the create operation -* The resource kind -* Client details -* The response code - -Expand the example below for an audit log entry: - -
- Audit log entry - -```json -{ - "level": "Metadata", - "auditID": "51bbe609-14ad-4874-be78-1289c10d506a", - "stage": "ResponseComplete", - "requestURI": "/apis/nop.crossplane.io/v1alpha1/nopresources?fieldManager=kubectl-client-side-apply&fieldValidation=Strict", - "verb": "create", - "user": { - "username": "kubernetes-admin", - "groups": ["system:masters", "system:authenticated"] - }, - "impersonatedUser": { - "username": "upbound:spaces:host:masterclient", - "groups": [ - "system:authenticated", - "upbound:controlplane:admin", - "upbound:spaces:host:system:masters" - ] - }, - "sourceIPs": ["10.244.0.135", "127.0.0.1"], - "userAgent": "kubectl/v1.32.2 (darwin/arm64) kubernetes/67a30c0", - "objectRef": { - "resource": "nopresources", - "name": "example", - "apiGroup": "nop.crossplane.io", - "apiVersion": "v1alpha1" - }, - "responseStatus": { "metadata": {}, "code": 201 }, - "requestReceivedTimestamp": "2025-09-19T23:03:24.540067Z", - "stageTimestamp": "2025-09-19T23:03:24.557583Z", - "annotations": { - "authorization.k8s.io/decision": "allow", - "authorization.k8s.io/reason": "RBAC: allowed by ClusterRoleBinding \"controlplane-admin\" of ClusterRole \"controlplane-admin\" to Group \"upbound:controlplane:admin\"" - } - } -``` -
- -## Customize the audit policy - -Spaces `v1.14.0` includes a default audit policy. You can customize this policy -by creating a configuration file and passing the values to -`observability.collectors.apiServer.auditPolicy` in the helm values file. - -An example custom audit policy: - -```yaml -observability: - controlPlanes: - apiServer: - auditPolicy: | - apiVersion: audit.k8s.io/v1 - kind: Policy - rules: - # ============================================================================ - # RULE 1: Exclude health check and version endpoints - # ============================================================================ - - level: None - nonResourceURLs: - - '/healthz*' - - '/readyz*' - - /version - # ============================================================================ - # RULE 2: ConfigMaps - Write operations only - # ============================================================================ - - level: Metadata - resources: - - group: "" - resources: - - configmaps - verbs: - - create - - update - - patch - - delete - omitStages: - - RequestReceived - - ResponseStarted - # ============================================================================ - # RULE 3: Secrets - ALL operations - # ============================================================================ - - level: Metadata - resources: - - group: "" - resources: - - secrets - verbs: - - get - - list - - watch - - create - - update - - patch - - delete - omitStages: - - RequestReceived - - ResponseStarted - # ============================================================================ - # RULE 4: Global exclusion of read-only operations - # ============================================================================ - - level: None - verbs: - - get - - list - - watch - # ========================================================================== - # RULE 5: Exclude standard Kubernetes resources from write operation logging - # ========================================================================== - - level: None - resources: - - group: "" - - group: "apps" - - group: "networking.k8s.io" - - group: "policy" - - group: "rbac.authorization.k8s.io" - - group: "storage.k8s.io" - - group: "batch" - - group: "autoscaling" - - group: "metrics.k8s.io" - - group: "node.k8s.io" - - group: "scheduling.k8s.io" - - group: "coordination.k8s.io" - - group: "discovery.k8s.io" - - group: "events.k8s.io" - - group: "flowcontrol.apiserver.k8s.io" - - group: "internal.apiserver.k8s.io" - - group: "authentication.k8s.io" - - group: "authorization.k8s.io" - - group: "admissionregistration.k8s.io" - verbs: - - create - - update - - patch - - delete - # ============================================================================ - # RULE 6: Catch-all for ALL custom resources and any missed resources - # ============================================================================ - - level: Metadata - verbs: - - create - - update - - patch - - delete - omitStages: - - RequestReceived - - ResponseStarted - # ============================================================================ - # RULE 7: Final catch-all - exclude everything else - # ============================================================================ - - level: None - omitStages: - - RequestReceived - - ResponseStarted -``` -You can apply this policy during Spaces installation or upgrade using the helm values file. - -Audit policies use rules evaluated in order from top to bottom where the first -matching rule applies. Control plane audit policies follow Kubernetes conventions and use the -following logging levels: - -* **None** - Don't log events matching this rule -* **Metadata** - Log request metadata (user, timestamp, resource, verb) but not request or response bodies -* **Request** - Log metadata and request body but not response body -* **RequestResponse** - Log metadata, request body, and response body - -For more information, review the Kubernetes [Auditing] documentation. - -## Disable audit logging - -You can disable audit logging on a control plane by removing it from the -`SharedTelemetryConfig` selector or by deleting the `SharedTelemetryConfig`. - -### Disable for specific control planes - -Remove the `audit-enabled` label from control planes that should stop sending audit logs: - -```bash -kubectl label controlplane --namespace audit-enabled- -``` - -The `SharedTelemetryConfig` no longer selects this control plane, and audit log collection stops. - -### Disable for all control planes - -Delete the `SharedTelemetryConfig` to stop audit logging for all control planes it manages: - -```bash -kubectl delete sharedtelemetryconfig --namespace -``` - -[ctp-selection]: /self-hosted-spaces/howtos/observability/#control-plane-selection -[Auditing]: https://kubernetes.io/docs/tasks/debug/debug-cluster/audit/ diff --git a/self-hosted-spaces_versioned_docs/version-1.16/howtos/ctp-connector.md b/self-hosted-spaces_versioned_docs/version-1.16/howtos/ctp-connector.md deleted file mode 100644 index 5fcf1684e..000000000 --- a/self-hosted-spaces_versioned_docs/version-1.16/howtos/ctp-connector.md +++ /dev/null @@ -1,499 +0,0 @@ ---- -title: Control Plane Connector -weight: 80 -description: A guide for how to connect a Kubernetes app cluster to a control plane in Upbound using the Control Plane connector feature -plan: "standard" ---- - - - -Control Plane Connector connects arbitrary Kubernetes application clusters outside the -Upbound Spaces to your control planes running in Upbound Spaces. -This lets you interact with your control plane's API from the app cluster. The claim APIs and the namespaced XR APIs -you define via CompositeResourceDefinitions (XRDs) in the control plane, are available in -your app cluster alongside Kubernetes workload APIs like Pod. Control Plane Connector -enables the same experience as a locally installed Crossplane. - -![control plane connector operations flow](/img/ConnectorFlow.png) - -### Control Plane Connector operations - -Control Plane Connector leverages the [Kubernetes API AggregationLayer][kubernetes-api-aggregationlayer] -to create an extension API server and serve the claim APIs and the namespaced XR APIs in the control plane. It -discovers the claim APIs and the namespaced XR APIs available in the control plane and registers corresponding -APIService resources on the app cluster. Those APIService resources refer to the -extension API server of Control Plane Connector. - -The claim APIs and the namespaced XR APIs are available in your Kubernetes cluster, just like all native -Kubernetes APIs. - -The Control Plane Connector processes every request targeting the claim APIs and the namespaced XR APIs and makes the -relevant requests to the connected control plane. - -Only the connected control plane stores and processes all claims and namespaced XRs created in the app -cluster, eliminating any storage use at the application cluster. The control plane -connector provisions a target namespace at the control plane for the app cluster and stores -all claims and namespaced XRs in this target namespace. - -For managing the claims and namespaced XRs, the Control Plane Connector creates a unique identifier for a -resource by combining input parameters from claims, including: -- `metadata.name` -- `metadata.namespace` -- `your cluster name` - - -It employs SHA-256 hashing to generate a hash value and then extracts the first -16 characters of that hash. This ensures the resulting identifier remains within -the 64-character limit in Kubernetes. - - - -For instance, if a claim named `my-bucket` exists in the test namespace in -`cluster-dev`, the system calculates the SHA-256 hash from -`my-bucket-x-test-x-00000000-0000-0000-0000-000000000000` and takes the first 16 -characters. The control plane side then names the claim `claim-c603e518969b413e`. - -For namespaced XRs, the process is similar, only the prefix is different. -The name becomes `nxr-c603e518969b413e`. - - -### Installation - - - - - -Log in with the up CLI: - -```bash -up login -``` - -Connect your app cluster to a namespace in an Upbound control plane with `up controlplane connector install `. This command creates a user token and installs the Control Plane Connector to your cluster. It's recommended you create a values file called `connector-values.yaml` and provide the following below. Select the tab according to which environment your control plane is running in. - - - - - - -```yaml -upbound: - # This is your org account in Upbound e.g. the name displayed after executing `up org list` - account: - # This is a personal access token generated in the Upbound Console - token: - -spaces: - # If your control plane is running in Upbound's GCP Cloud Space, else use upbound-aws-us-east-1.spaces.upbound.io - host: "upbound-gcp-us-west-1.spaces.upbound.io" - insecureSkipTLSVerify: true - controlPlane: - # The name of the control plane you want the Connector to attach to - name: - # The control plane group the control plane resides in - group: - # The namespace within the control plane to sync claims from the app cluster to. NOTE: This must be created before you install the connector. - claimNamespace: -``` - - - - - -1. Create a [kubeconfig][kubeconfig] for the control plane. Update your Upbound context to the path for your desired control plane. -```ini -up login -up ctx /upbound-gcp-us-central-1/default/your-control-plane -up ctx . -f - > context.yaml -``` - -2. Write it to a secret in the cluster where you plan to -install the Control Plane Connector to. -```ini -kubectl create secret generic my-controlplane-kubeconfig --from-file=context.yaml -``` - -3. Reference this secret in the -`spaces.controlPlane.kubeconfigSecret` field below. - -```yaml -spaces: - controlPlane: - # The namespace within the control plane to sync claims from the app cluster to. NOTE: This must be created before you install the connector. - claimNamespace: - kubeconfigSecret: - name: my-controlplane-kubeconfig - key: kubeconfig -``` - - - - - - -Provide the values file above when you run the CLI command: - - -```bash {copy-lines="3"} -up controlplane connector install my-control-plane my-app-ns-1 --file=connector-values.yaml -``` - -The Claim APIs and the namespaced XR APIs from your control plane are now visible in the cluster. -You can verify this with `kubectl api-resources`. - -```bash -kubectl api-resources -``` - -### Uninstall - -Disconnect an app cluster that you prior installed the Control Plane Connector on by -running the following: - -```bash -up ctp connector uninstall -``` - -This command uninstalls the helm chart for the Control Plane Connector from an app -cluster. It moves any claims in the app cluster into the control plane -at the specified namespace. - -:::tip -Ensure your kubeconfig's current context points to the app cluster where you uninstall Control Plane Connector. -::: - - - - -It's recommended you create a values file called `connector-values.yaml` and -provide the following below. Select the tab according to which environment your -control plane is running in. - - - - - - -```yaml -upbound: - # This is your org account in Upbound e.g. the name displayed after executing `up org list` - account: - # This is a personal access token generated in the Upbound Console - token: - -spaces: - # Upbound GCP US-West-1 upbound-gcp-us-west-1.spaces.upbound.io - # Upbound AWS US-East-1 upbound-aws-us-east-1.spaces.upbound.io - # Upbound GCP US-Central-1 upbound-gcp-us-central-1.spaces.upbound.io - host: "" - insecureSkipTLSVerify: true - controlPlane: - # The name of the control plane you want the Connector to attach to - name: - # The control plane group the control plane resides in - group: - # The namespace within the control plane to sync claims from the app cluster to. - # NOTE: This must be created before you install the connector. - claimNamespace: -``` - - - - -Create a [kubeconfig][kubeconfig-1] for the -control plane. Write it to a secret in the cluster where you plan to -install the Control Plane Connector to. Reference this secret in the -`spaces.controlPlane.kubeconfigSecret` field below. - -```yaml -spaces: - controlPlane: - # The namespace within the control plane to sync claims from the app cluster to. NOTE: This must be created before you install the connector. - claimNamespace: - kubeconfigSecret: - name: my-controlplane-kubeconfig - key: kubeconfig -``` - - - - - - -Provide the values file above when you `helm install` the Control Plane Connector: - - -```bash -helm install --wait mcp-connector oci://xpkg.upbound.io/spaces-artifacts/mcp-connector -n kube-system -f connector-values.yaml -``` -:::tip -Create an API token from the Upbound user account settings page in the console by following [these instructions][these-instructions]. -::: - -### Uninstall - -You can uninstall Control Plane Connector with Helm by running the following: - -```bash -helm uninstall mcp-connector -``` - - - - - -### Example usage - -This example creates a control plane using [Configuration -EKS][configuration-eks]. `KubernetesCluster` is -available as a claim API in your control plane. The following is [an -example][an-example] -object you can create in your control plane. - -```yaml -apiVersion: k8s.starter.org/v1alpha1 -kind: KubernetesCluster -metadata: - name: my-cluster - namespace: default -spec: - id: my-cluster - parameters: - nodes: - count: 3 - size: small - services: - operators: - prometheus: - version: "34.5.1" - writeConnectionSecretToRef: - name: my-cluster-kubeconfig -``` - -After connecting your Kubernetes app cluster to the control plane, you -can create the `KubernetesCluster` object in your app cluster. Although your -local cluster has an Object, the actual resources is in your managed control -plane inside Upbound. - -```bash {copy-lines="3"} -# Applying the claim YAML above. -# kubectl is set up to talk with your Kubernetes cluster. -kubectl apply -f claim.yaml - - -kubectl get claim -A -NAME SYNCED READY CONNECTION-SECRET AGE -my-cluster True True my-cluster-kubeconfig 2m -``` - -Once Kubernetes creates the object, view the console to see your object. - -![Claim by connector in console](/img/ClaimInConsole.png) - -You can interact with the object through your cluster just as if it -lives in your cluster. - -### Migration to control planes - -This guide covers migrating a Crossplane installation to Upbound-managed control planes. It uses the Control Plane Connector to manage claims on an application cluster. - -![migration flow application cluster to control plane](/img/ConnectorMigration.png) - -#### Export all resources - -Before proceeding, ensure that you have set the correct kubecontext for your application -cluster. - -```bash -up controlplane migration export --pause-before-export --output=my-export.tar.gz --yes -``` - -This command performs the following: -- Pauses all claim, composite, and managed resources before export. -- Scans the control plane for resource types. -- Exports Crossplane and native resources. -- Archives the exported state into `my-export.tar.gz`. - -Example output: -```bash -Exporting control plane state... - ✓ Pausing all claim resources before export... 1 resources paused! ⏸️ - ✓ Pausing all composite resources before export... 7 resources paused! ⏸️ - ✓ Pausing all managed resources before export... 34 resources paused! ⏸️ - ✓ Scanning control plane for types to export... 231 types found! 👀 - ✓ Exporting 231 Crossplane resources...125 resources exported! 📤 - ✓ Exporting 3 native resources...19 resources exported! 📤 ✓ Archiving exported state... archived to "my-export.tar.gz"! 📦 - -Successfully exported control plane state! -``` - -#### Import all resources - -The system restores the target control plane with the exported -resources, which serves as the destination for the Control Plane Connector. - - -Log into Upbound and select the correct context: - -```bash -up login -up ctx -up ctp create ctp-a -``` - -Output: -```bash -ctp-a created -``` - -Verify that the Crossplane version on both the application cluster and the new managed -control plane matches the core Crossplane version. - -Use the following command to import the resources: -```bash -up controlplane migration import -i my-export.tar.gz \ - --unpause-after-import \ - --mcp-connector-cluster-id=my-appcluster \ - --mcp-connector-claim-namespace=my-appcluster -``` - -This command: -- Note: `--mcp-connector-cluster-id` needs to be unique per application cluster -- Note: `--mcp-connector-claim-namespace` is the namespace the system creates - during the import -- Restores base resources -- Waits for XRDs and packages to establish -- Imports Claims, XRs resources -- Finalizes the import and resumes managed resources - -Example output: -```bash -Importing control plane state... - ✓ Reading state from the archive... Done! 👀 - ✓ Importing base resources... 56 resources imported!📥 - ✓ Waiting for XRDs... Established! ⏳ - ✓ Waiting for Packages... Installed and Healthy! ⏳ - ✓ Importing remaining resources... 88 resources imported! 📥 - ✓ Finalizing import... Done! 🎉 - ✓ Unpausing managed resources ... Done! ▶️ - -fully imported control plane state! -``` - -Verify Imported Claims - - -The Control Plane Connector renames all claims and adds additional labels to them. - -```bash -kubectl get claim -A -``` - -Example output: -```bash -NAMESPACE NAME SYNCED READY CONNECTION-SECRET AGE -my-appcluster cluster.aws.platformref.upbound.io/claim-e708ff592b974f51 True True platform-ref-aws-kubeconfig 3m17s -``` - -Inspect the labels: -```bash -kubectl get -n my-appcluster cluster.aws.platformref.upbound.io/claim-e708ff592b974f51 -o yaml | yq .metadata.labels -``` - -Example output: -```bash -mcp-connector.upbound.io/app-cluster: my-appcluster -mcp-connector.upbound.io/app-namespace: default -mcp-connector.upbound.io/app-resource-name: example -``` - -#### Cleanup the app cluster - -Remove all Crossplane-related resources from the application cluster, including: - -- Managed Resources -- Claims -- Compositions -- XRDs -- Packages (Functions, Configurations, Providers) -- Crossplane and all associated CRDs - - -#### Install Control Plane Connector - - -Follow the preceding installation guide and configure the `connector-values.yaml`: - -```yaml -# NOTE: clusterID needs to match --mcp-connector-cluster-id used in the import on the managed control Plane -clusterID: my-appcluster -upbound: - account: - token: - -spaces: - host: "" - insecureSkipTLSVerify: true - controlPlane: - name: - group: - # NOTE: This is the --mcp-connector-claim-namespace used during the import to the control plane - claimNamespace: -``` -Once the Control Plane Connector installs, verify that resources exist in the application -cluster: - -```bash -kubectl api-resources | grep platform -``` - -Example output: -```bash -awslbcontrollers aws.platform.upbound.io/v1alpha1 true AWSLBController -podidentities aws.platform.upbound.io/v1alpha1 true PodIdentity -sqlinstances aws.platform.upbound.io/v1alpha1 true SQLInstance -clusters aws.platformref.upbound.io/v1alpha1 true Cluster -osss observe.platform.upbound.io/v1alpha1 true Oss -apps platform.upbound.io/v1alpha1 true App -``` - -Restore claims from the control plane to the application cluster: - -```bash -kubectl get claim -A -``` - -Example output: -```bash -NAMESPACE NAME SYNCED READY CONNECTION-SECRET AGE -default cluster.aws.platformref.upbound.io/example True True platform-ref-aws-kubeconfig 127m -``` - -With this guide, you migrated your Crossplane installation to -Upbound-control planes. This ensures seamless integration with your -application cluster using the Control Plane Connector. - -### Connect multiple app clusters to a control plane - -Claims are store in a unique namespace in the Upbound control plane. -Every cluster creates a new control plane namespace. - -![Multi-cluster architecture with control plane connector](/img/ConnectorMulticlusterArch.png) - -There's no limit on the number of clusters connected to a single control plane. -Control plane operators can see all their infrastructure in a central control -plane. - -Without using control planes and Control Plane Connector, users have to install -Crossplane and providers for cluster. Each cluster requires configuration for -providers with necessary credentials. With a single control plane where multiple -clusters connected through Upbound tokens, you don't need to give out any cloud -credentials to the clusters. - - -[kubeconfig]: /manuals/cli/howtos/context-config/#generate-a-kubeconfig-for-a-control-plane-in-a-group -[kubeconfig-1]:/self-hosted-spaces/concepts/control-planes/#connect-directly-to-your-control-plane -[these-instructions]:/manuals/console/upbound-console/#create-a-personal-access-token -[kubernetes-api-aggregationlayer]: https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation/ -[configuration-eks]: https://github.com/upbound/configuration-eks -[an-example]: https://github.com/upbound/configuration-eks/blob/9f86b6d/.up/examples/cluster.yaml diff --git a/self-hosted-spaces_versioned_docs/version-1.16/howtos/debugging-a-ctp.md b/self-hosted-spaces_versioned_docs/version-1.16/howtos/debugging-a-ctp.md deleted file mode 100644 index b72012832..000000000 --- a/self-hosted-spaces_versioned_docs/version-1.16/howtos/debugging-a-ctp.md +++ /dev/null @@ -1,123 +0,0 @@ ---- -title: Debugging issues on a control plane -sidebar_position: 70 -description: A guide for how to debug resources on a control plane running in Upbound. ---- - -This guide provides troubleshooting guidance for how to identify and fix issues on a control plane. - - -## Start from Upbound Console - - -The Upbound [Console][console] has a built-in control plane explorer experience -that surfaces status and events for the resources on your control plane. The -explorer is claim-based. Resources in this view exist only if they exist in the -reference chain originating from a claim. This view is a helpful starting point -if you are attempting to debug an issue originating from a claim. - -:::tip -If you directly create Crossplane Managed Resources (`MR`s) or Composite -Resources (`XR`s), they don't render in the explorer. -::: - -### Example - -The example below uses the control plane explorer view to inspect why a claim for an EKS Cluster isn't healthy. - -#### Check the health status of claims - -From the API type card, two claims branching from of it: one shows a healthy green icon, while the other shows an unhealthy red icon. - -![Use control plane explorer view to see status of claims](/img/debug-overview.png) - -Select `More details` on the unhealthy claim card and Upbound shows details for the claim. - -![Use control plane explorer view to see details of claims](/img/debug-claim-more-details.png) - -Looking at the three events for this claim: - -- **ConfigureCompositeResource**: this event indicates Upbound created the claimed Composite Resource (`XR`). - -- **BindCompositeResource**: this indicates the Composite Resource (`XR`) that's being "claimed" isn't ready yet. A claim doesn't show `HEALTHY` until the XR it references is ready. - -- **ConfigureCompositeResource**: the error saying, `cannot apply composite resource...the object has been modified; please apply your changes to the latest version and try again` is a generic event from Crossplane resources. It's safe to ignore this error. - -Next, look at the `status` field of the rendered YAML for the resource. - -![Use control plane explorer view to see status details of claims](/img/debug-claim-status.png) - -The status reports a similar message as the event stream: this claim is waiting for a Composite Resource to be ready. Based on this, investigate the Composite Resource referenced by this claim next. - -#### Check the health status of the Composite Resource - - -The control plane explorer only shows the claim cards by default. Selecting the claim card renders the rest of the Crossplane resource tree associated with the selected claim. - - -The previous claim expands into this screenshot: - -![Use control plane explorer view to expand tree of claim](/img/debug-claim-expansion.png) - -This renders the XR referenced by the claim (along with all its references). You can see the XR is showing the same unhealthy status icon in its card. Notice the XR has itself two nested XRs. One of the nested XRs shows a healthy green icon on its card, while the other shows an unhealthy red icon. Like the claim, a Composite Resource doesn't show healthy until all referenced resources also show healthy. - -#### Inspecting Managed Resources - -Select `more details` to inspect one of the unhealthy Managed Resources shows the following: - -![Use control plane explorer view to view events for an MR](/img/debug-mr-event.png) - -This event reveals it's unhealthy because it's waiting on a reference to another Managed Resource. Searching the rendered YAML of the MR for this resource shows the following: - -![Use control plane explorer view to view status for an MR](/img/debug-mr-status.png) - -The rendered YAML shows this MR is referencing a sibling MR that shares the same controller. The same parent XR created both of these managed resources. Inspect the sibling MR to see what its status is. - -![Use control plane explorer view to view status for a sibling MR](/img/debug-mr-dependency-status.png) - -The sibling MR event stream shows the Provider processed the resource create request. Ignore the `CannotInitalizeManagedResrouce` event. EKS clusters can take 15 minutes or more to provision in AWS. The root cause is everything is fine -- all the resources are still provisioning. Waiting longer and then looking at the control plane explorer again, shows all resources are healthy. For reference, below is an example status field for a resource that's healthy and provisioned. - -```yaml -... -status: - atProvider: - id: team-b-app-cluster-bhwfb-hwtgs-20230403135452772300000008 - conditions: - - lastTransitionTime: '2023-04-03T13:56:35Z' - reason: Available - status: 'True' - type: Ready - - lastTransitionTime: '2023-04-03T13:54:02Z' - reason: ReconcileSuccess - status: 'True' - type: Synced - - lastTransitionTime: '2023-04-03T13:54:53Z' - reason: Success - status: 'True' - type: LastAsyncOperation - - lastTransitionTime: '2023-04-03T13:54:53Z' - reason: Finished - status: 'True' - type: AsyncOperation -``` - -### Control plane explorer limitations - -The control plane explorer view is currently designed around claims (`XC`s). The control plane explorer doesn't inspect other Crossplane resources. To inspect other Crossplane resources, use the `up` CLI. - -Some examples of Crossplane resources that require the `up` CLI - -- Managed Resources that aren't associated with a claim -- Composite Resources that aren't associated with a claim -- The status of _deleting_ resources -- ProviderConfigs -- Provider events - -## Use direct CLI access - -If your preference is to use a terminal instead of a GUI, Upbound supports direct access to the API server of the control plane. Use [`up ctx`][up-ctx] to connect directly to your control plane. - - -[console]: /manuals/console/upbound-console -[up-ctx]: /reference/cli-reference diff --git a/self-hosted-spaces_versioned_docs/version-1.16/howtos/declarative-ctps.md b/self-hosted-spaces_versioned_docs/version-1.16/howtos/declarative-ctps.md deleted file mode 100644 index 2674ca52b..000000000 --- a/self-hosted-spaces_versioned_docs/version-1.16/howtos/declarative-ctps.md +++ /dev/null @@ -1,105 +0,0 @@ ---- -title: Declaratively create control planes -sidebar_position: 99 -description: A tutorial to configure a Space with Argo to declaratively create and - manage control planes ---- - -In this tutorial, you learn how to configure [Argo CD][argo-cd] to communicate with a self-hosted Space. This flow allows you to declaratively create and manage control planes from Git. Argo CD is a continuous delivery tool for Kubernetes that you can use to drive GitOps flows for your control plane infrastructure. - - -## Prerequisites - -To complete this tutorial, you need the following: - -- Have already deployed an Upbound Space. -- Have already deployed an instance of Argo CD on a Kubernetes cluster. - -## Connect your Space to Argo CD - -Fetch the kubeconfig for the Space cluster, the Kubernetes cluster where you installed the Upbound Spaces software. You must add the Space cluster as a context to Argo. - -```ini -export SPACES_CLUSTER_SERVER="https://url" -export SPACES_CLUSTER_NAME="cluster" -``` - -Switch contexts to the Kubernetes cluster where you've installed Argo. Create a secret on the Argo cluster whose data contains the connection details of the Space cluster. - -:::important -Run the following commands against your **Argo** cluster, not your Space cluster. -::: - -Run the following command in a terminal: - -```yaml -cat < - - -:::important - -This feature is in preview. The Query API is available in the Cloud Space offering in `v1.6` and enabled by default. - -Since Spaces `v1.8.0`, you must enable Query API to connect a Space. - -Use this guide to enable Query API in your Space. -::: - -Upbound's Query API allows users to inspect objects and resources within their control planes. The read-only `up alpha query` and `up alpha get` CLI commands allow you to gather information on your control planes in a fast and efficient package. These commands follow the [`kubectl` conventions][kubectl-conventions] for filtering, sorting, and retrieving information from your Space. - -Query API requires a PostgreSQL database to store the data. You can use the default PostgreSQL instance provided by Upbound or bring your own PostgreSQL instance. - -## Managed setup - -:::tip -If you don't have specific requirements for your setup, Upbound recommends following this approach. -::: - -To enable this feature, set `features.alpha.apollo.enabled=true` and `apollo.apollo.storage.postgres.create=true` when installing Spaces. - -However, you need to install CloudNativePG (`CNPG`) to provide the PostgreSQL instance. You can let the `up` CLI do this for you, or install it manually. - -For more customization, see the [Helm chart reference][helm-chart-reference]. You can modify the number -of PostgreSQL instances, pooling instances, storage size, and more. - -If you have specific requirements not addressed in the Helm chart, see below for more information on how to bring your own [PostgreSQL setup][postgresql-setup]. - -### Using the up CLI - -Before you begin, make sure you have the most recent version of the [`up` CLI installed][up-cli-installed]. - -To enable this feature, set `features.alpha.apollo.enabled=true` and `apollo.apollo.storage.postgres.create=true` when installing Spaces: - -```bash -up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ - ... - --set "features.alpha.apollo.enabled=true" \ - --set "apollo.apollo.storage.postgres.create=true" -``` - -`up space init` and `up space upgrade` install CloudNativePG automatically, if needed. - -### Helm chart - -If you are installing the Helm chart in some other way, you can manually install CloudNativePG in one of the [supported ways][supported-ways], for example: - -```shell -kubectl apply --server-side -f \ - https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.24.1/cnpg-1.24.1.yaml -kubectl rollout status -n cnpg-system deployment cnpg-controller-manager -w --timeout 120s -``` - -Next, install the Spaces Helm chart with the necessary values, for example: - -```shell -helm -n upbound-system upgrade --install spaces \ - oci://xpkg.upbound.io/spaces-artifacts/spaces \ - --version "${SPACES_VERSION}" \ - ... - --set "features.alpha.apollo.enabled=true" \ - --set "apollo.apollo.storage.postgres.create=true" \ - --wait -``` - -## Self-hosted PostgreSQL configuration - - -If your workflow requires more customization, you can provide your own -PostgreSQL instance and configure credentials manually. - -Using your own PostgreSQL instance requires careful architecture consideration. -Review the architecture and requirements guidelines. - -### Architecture - -The Query API architecture uses three components, other than a PostgreSQL database: -* **Apollo Syncers**: Watching `ETCD` for changes and syncing them to PostgreSQL. One, or more, per control plane. -* **Apollo Server**: Serving the Query API out of the data in PostgreSQL. One, or more, per Space. - -The default setup also uses the `PgBouncer` connection pooler to manage connections from the syncers. -```mermaid -graph LR - User[User] - - subgraph Cluster["Cluster (Spaces)"] - direction TB - Apollo[apollo] - - subgraph ControlPlanes["Control Planes"] - APIServer[API Server] - Syncer[apollo-syncer] - end - end - - PostgreSQL[(PostgreSQL)] - - User -->|requests| Apollo - - Apollo -->|connects| PostgreSQL - Apollo -->|creates schemas & users| PostgreSQL - - Syncer -->|watches| APIServer - Syncer -->|writes| PostgreSQL - - PostgreSQL -->|data| Apollo - - style PostgreSQL fill:#e1f5ff,stroke:#333,stroke-width:2px,color:#000 - style Apollo fill:#ffe1e1,stroke:#333,stroke-width:2px,color:#000 - style Cluster fill:#f0f0f0,stroke:#333,stroke-width:2px,color:#000 - style ControlPlanes fill:#fff,stroke:#666,stroke-width:1px,stroke-dasharray: 5 5,color:#000 -``` - - -Each component needs to connect to the PostgreSQL database. - -In the event of database issues, you can provide a new database and the syncers -automatically repopulate the data. - -### Requirements - -* A PostgreSQL 16 instance or cluster. -* A database, for example named `upbound`. -* **Optional**: A dedicated user for the Apollo Syncers, otherwise the Spaces Controller generates a dedicated set of credentials per syncer with the necessary permissions, for example named `syncer`. -* A dedicated **superuser or admin account** for the Apollo Server. -* **Optional**: A connection pooler, like PgBouncer, to manage connections from the Apollo Syncers. If you didn't provide the optional users, you might have to configure the pooler to allow users to connect using the same credentials as PostgreSQL. -* **Optional**: A read replica for the Apollo Syncers to connect to, to reduce load on the primary database, this might cause a slight delay in the data being available through the Query API. - -Below you can find examples of setups to get you started, you can mix and match the examples to suit your needs. - -### In-cluster setup - - -:::tip - -If you don't have strong opinions on your setup, but still want full control on -the resources created for some unsupported customizations, Upbound recommends -the in-cluster setup. - -::: - - -For more customization than the managed setup, you can use CloudNativePG for -PostgreSQL in the same cluster. - -For in-cluster setup, manually deploy the operator in one of the [supported ways][supported-ways-1], for example: - -```shell -kubectl apply --server-side -f \ - https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.24.1/cnpg-1.24.1.yaml -kubectl rollout status -n cnpg-system deployment cnpg-controller-manager -w --timeout 120s -``` - -Then create a `Cluster` and `Pooler` in the `upbound-system` namespace, for example: - -```shell -kubectl create ns upbound-system - -kubectl apply -f - < - -### External setup - - -:::tip -Use this method if you decide the `apollo` manager can manage credentials , but your -PostgreSQL instance is outside the cluster. -::: - -When using this setup, you must manually create the required Secrets in the -`upbound-system` namespace. The `apollo` user must have permissions to create -schemas and users. - -```shell - -kubectl create ns upbound-system - -# A Secret containing the necessary credentials to connect to the PostgreSQL instance -kubectl create secret generic spaces-apollo-pg-app -n upbound-system \ - --from-literal=password=supersecret - -# A Secret containing the necessary CA certificate to verify the connection to the PostgreSQL instance -kubectl create secret generic spaces-apollo-pg-ca -n upbound-system \ - --from-file=ca.crt=/path/to/ca.crt -``` - -Next, install Spaces with the necessary settings: - -```shell -export PG_URL=your-postgres-host:5432 -export PG_POOLED_URL=your-pgbouncer-host:5432 # this could be the same as above - -helm upgrade --install ... \ - --set "features.alpha.apollo.enabled=true" \ - --set "apollo.apollo.storage.postgres.create=false" \ - --set "apollo.apollo.storage.postgres.connection.url=$PG_URL" \ - --set "apollo.apollo.storage.postgres.connection.credentials.secret.name=spaces-apollo-pg-app" \ - --set "apollo.apollo.storage.postgres.connection.credentials.format=basicauth" \ - --set "apollo.apollo.storage.postgres.connection.ca.name=spaces-apollo-pg-ca" \ - --set "apollo.apollo.storage.postgres.connection.syncer.url=$PG_POOLED_URL" -``` - -### External setup with all custom credentials - -For custom credentials with Apollo Syncers or Server, create a new secret in the -`upbound-system` namespace: - -```shell -export APOLLO_SYNCER_USER=syncer -export APOLLO_SERVER_USER=apollo - -kubectl create ns upbound-system - -# A Secret containing the necessary credentials to connect to the PostgreSQL instance -kubectl create secret generic spaces-apollo-pg-app -n upbound-system \ - --from-literal=password=supersecret - -# A Secret containing the necessary CA certificate to verify the connection to the PostgreSQL instance -kubectl create secret generic spaces-apollo-pg-ca -n upbound-system \ - --from-file=ca.crt=/path/to/ca.crt - -# A Secret containing the necessary credentials for the Apollo Syncers to connect to the PostgreSQL instance. -# These will be used by all Syncers in the Space. -kubectl create secret generic spaces-apollo-pg-syncer -n upbound-system \ - --from-literal=username=$APOLLO_SYNCER_USER \ - --from-literal=password=supersecret - -# A Secret containing the necessary credentials for the Apollo Server to connect to the PostgreSQL instance. -kubectl create secret generic spaces-apollo-pg-apollo -n upbound-system \ - --from-literal=username=$APOLLO_SERVER_USER \ - --from-literal=password=supersecret -``` - -Next, install Spaces with the necessary settings: - -```shell -export PG_URL=your-postgres-host:5432 -export PG_POOLED_URL=your-pgbouncer-host:5432 # this could be the same as above - -helm ... \ - --set "features.alpha.apollo.enabled=true" \ - --set "apollo.apollo.storage.postgres.create=false" \ - --set "apollo.apollo.storage.postgres.connection.url=$PG_URL" \ - --set "apollo.apollo.storage.postgres.connection.credentials.secret.name=spaces-apollo-pg-app" \ - --set "apollo.apollo.storage.postgres.connection.credentials.format=basicauth" \ - --set "apollo.apollo.storage.postgres.connection.ca.name=spaces-apollo-pg-ca" \ - --set "apollo.apollo.storage.postgres.connection.syncer.url=$PG_POOLED_URL" \ - - # For the syncers - --set "apollo.apollo.storage.postgres.connection.syncer.credentials.format=basicauth" \ - --set "apollo.apollo.storage.postgres.connection.syncer.credentials.user=$APOLLO_SYNCER_USER" \ - --set "apollo.apollo.storage.postgres.connection.syncer.credentials.secret.name=spaces-apollo-pg-syncer" \ - - # For the server - --set "apollo.apollo.storage.postgres.connection.apollo.credentials.format=basicauth" \ - --set "apollo.apollo.storage.postgres.connection.apollo.credentials.user=$APOLLO_SERVER_USER" \ - --set "apollo.apollo.storage.postgres.connection.apollo.credentials.secret.name=spaces-apollo-pg-apollo" \ - --set "apollo.apollo.storage.postgres.connection.apollo.url=$PG_POOLED_URL" -``` - - -## Using the Query API - - -See the [Query API documentation][query-api-documentation] for more information on how to use the Query API. - - - - -[postgresql-setup]: #self-hosted-postgresql-configuration -[up-cli-installed]: /manuals/cli/overview -[query-api-documentation]: /self-hosted-spaces/howtos/query-api - -[helm-chart-reference]: /reference/spaces-helm-reference/ -[kubectl-conventions]: https://kubernetes.io/docs/reference/kubectl/generated/kubectl_get/ -[supported-ways]: https://cloudnative-pg.io/documentation/current/installation_upgrade/ -[supported-ways-1]: https://cloudnative-pg.io/documentation/current/installation_upgrade/ -[cloudnativepg-documentation]: https://cloudnative-pg.io/documentation/1.24/storage/#configuration-via-a-pvc-template -[postgresql-cluster]: https://cloudnative-pg.io/documentation/1.24/resource_management/ -[pooler]: https://cloudnative-pg.io/documentation/1.24/connection_pooling/#pod-templates -[postgresql-cluster-2]: https://cloudnative-pg.io/documentation/1.24/replication/ -[pooler-3]: https://cloudnative-pg.io/documentation/1.24/connection_pooling/#high-availability-ha -[postgresql-cluster-4]: https://cloudnative-pg.io/documentation/1.24/operator_capability_levels/#override-of-operand-images-through-the-crd -[pooler-5]: https://cloudnative-pg.io/documentation/1.24/connection_pooling/#pod-templates -[cloudnativepg-documentation-6]: https://cloudnative-pg.io/documentation/1.24/postgresql_conf/ diff --git a/self-hosted-spaces_versioned_docs/version-1.16/howtos/deployment-reqs.md b/self-hosted-spaces_versioned_docs/version-1.16/howtos/deployment-reqs.md deleted file mode 100644 index 71d0ca08c..000000000 --- a/self-hosted-spaces_versioned_docs/version-1.16/howtos/deployment-reqs.md +++ /dev/null @@ -1,243 +0,0 @@ ---- -title: Deployment Requirements -sidebar_position: 2 -description: Deploy a Space in your production environment. ---- - -You need a Kubernetes cluster as the hosting environment to run Spaces. Upbound -validates the Spaces software runs on [AWS EKS][aws-eks], [Google Cloud -GKE][google-cloud-gke], and [Microsoft AKS][microsoft-aks]. Upbound recommends -dedicating the Kubernetes cluster for the express purpose of running Spaces as -its sole workload. - -## Kubernetes version compatibility - -Upbound tests each Spaces release against specific Kubernetes versions: - -| Spaces Version | Tested Kubernetes Versions | -|----------------|----------------------------| -| 1.16 | 1.33, 1.34, 1.35 | -| 1.15 | 1.32, 1.33, 1.34, 1.35 | -| 1.14 | 1.31, 1.32, 1.33, 1.34 | -| 1.13 | 1.30, 1.31, 1.32, 1.33 | - -## Deployment requirements - -Spaces requires three things: - -1. A Kubernetes cluster. -2. You've configured the Kubernetes cluster with the required prerequisites. -3. You must have an [Upbound account][upbound-account]. Spaces is a feature only available for paying customers in the **Business Critical** tier of Upbound. - -This guide helps you think through all steps needed to deploy Spaces for production workloads. - -## Sizing a Space - -In a Space, the control planes you create get scheduled as pods across the -cluster's node pools. The hyper scale cloud providers each offer managed -Kubernetes services that can support hundreds of nodes in their node pools. That -means the number of control planes you can run in a single Space is on the order -hundreds--if not more. - -Rightsizing a Space for a production deployment depends on several factors: - -1. The number of control planes you plan to run in the Space. -2. The number of managed resources you plan each control plane to reconcile. -3. The Crossplane providers you plan to install in each control plane. - -### Memory considerations - -#### Control plane empty state memory usage - -An idle, empty control plane consumes about 640 MB of memory. This encompasses -the set of pods that constitute a control plane and which get deployed for each -control plane instance. - -#### Managed resource memory usage - -In Upbound's testing, memory usage isn't influenced a lot by the number of managed resources under management. Memory usage only goes up slightly by 100 MB when going from 100 to 1000 resource instances under management of a control plane. Hence, for simplicity, you don't need to account for an increase in memory usage on this axis of the control plane. - -#### Provider memory usage - -When you install a Crossplane provider on a control plane, memory gets consumed -according to the number of custom resources it defines. Upbound [Official Provider families][official-provider-families] provide higher fidelity control -to platform teams to install providers for only the resources they need, -reducing the bloat of needlessly installing unused custom resources. Still, you -must factor provider memory usage into your calculations to ensure you've -rightsized the memory available in your Spaces cluster. - - -:::important -Be careful not to conflate `managed resource` with `custom resource definition`. -The former is an "instance" of an external resource in Crossplane, while the -latter defines the API schema of that resource. -::: - -It's estimated that each custom resource definition consumes ~3 MB of memory. -The calculation is: - -```bash -number_of_managed_resources_defined_in_provider x 3 MB = memory_required -``` - -For example, if you plan to use [provider-aws-ec2][provider-aws-ec2], [provider-aws-s3][provider-aws-s3], and [provider-aws-iam][provider-aws-iam], the resulting calculation is: - -```bash -provider-aws-ec2: 98 x 3 MB = 294 MB -provider-aws-s3: 23 x 3 MB = 69 MB -provider-aws-iam 22 x 3 MB = 66 MB ---- -total memory: 429 MB -``` - -In this scenario, you should budget ~430 MB of memory for provider usage on this control plane. - -:::tip -Do this calculation for each provider you plan to install on your control plane. -Then do this calculation for each control plane you plan to run in your Space. -::: - - -#### Total memory usage - -Add the memory usage from the previous sections. Given the preceding examples, -they result in a recommendation to budget ~1 GB memory for each control plane -you plan to run in the Space. - -:::important - -The 1 GB recommendation is an example. -You should input your own provider requirements to arrive at a final number for -your own deployment. - -::: - -### CPU considerations - -#### Managed resource CPU usage - -The number of managed resources under management by a control plane is the largest contributing factor for CPU usage in a Space. CPU usage scales linearly according to the number of managed resources under management by your control plane. In Upbound's testing, CPU usage requirements _does_ vary from provider to provider. Using the Upbound Official Provider families as a baseline: - - -| Provider | MR create operation (CPU core seconds) | MR update or reconciliation operation (CPU core seconds) | -| ---- | ---- | ---- | -| provider-family-aws | 10 | 2 to 3 | -| provider-family-gcp | 7 | 1.5 | -| provider-family-azure | 7 to 10 | 1.5 to 3 | - - -When resources are in a non-ready state, Crossplane providers reconcile often (as fast as every 15 seconds). Once a resource reaches `READY`, each Crossplane provider defaults to a 10 minute poll interval. Given this, a 16-core machine has `16x10x60 = 9600` CPU core seconds available. Interpreting this table: - -- A single control plane that needs to create 100 AWS MRs concurrently would consume 1000 CPU core seconds, or about 1.5 cores. -- A single control plane that continuously reconciles 100 AWS MRs once they've reached a `READY` state would consume 300 CPU core seconds, or a little under half a core. - -Since `provider-family-aws` has the highest recorded numbers for CPU time required, you can use that as an upper limit in your calculations. - -Using these calculations and extrapolating values, given a 16 core machine, it's recommended you don't exceed a single control plane managing 1000 MRs. Suppose you plan to run 10 control planes, each managing 1000 MRs. You want to make sure your node pool has capacity for 160 cores. If you are using a machine type that has 16 cores per machine, that would mean having a node pool of size 10. If you are using a machine type that has 32 cores per machine, that would mean having a node pool of size 5. - -#### Cloud API latency - -Oftentimes, you are using Crossplane providers to talk to external cloud APIs. Those external cloud APIs often have global API rate limits (examples: [Azure limits][azure-limits], [AWS EC2 limits][aws-ec2-limits]). - -For Crossplane providers built on [Upjet][upjet] (such as Upbound Official Provider families), these providers use Terraform under the covers. They expose some knobs (such as `--max-reconcile-rate`) you can use to tweak reconciliation rates. - -### Resource buffers - -The guidance in the preceding sections explains how to calculate CPU and memory usage requirements for: - -- a set of control planes in a Space -- tuned to the number of providers you plan to use -- according to the number of managed resource instances you plan to have managed by your control planes - -Upbound recommends budgeting an extra buffer of 20% to your resource capacity calculations. The numbers shared in the preceding sections don't account for peaks or surges since they're based off average measurements. Upbound recommends budgeting this buffer to account for these things. - -## Deploying more than one Space - -You are welcome to deploy more than one Space. You just need to make sure you have a 1:1 mapping of Space to Kubernetes clusters. Spaces are by their nature constrained to a single Kubernetes Cluster, which are regional entities. If you want to offer control planes in multiple cloud environments or multiple public clouds entirely, these are justifications for deploying >1 Spaces. - -## Cert-manager - -A Spaces deployment uses the [Certificate Custom Resource] from cert-manager to -provision certificates within the Space. This establishes a nice API boundary -between what your platform may need and the Certificate requirements of a -Space. - - -In the event you would like more control over the issuing Certificate Authority -for your deployment or the deployment of cert-manager itself, this guide is for -you. - - -### Deploying - -An Upbound Space deployment doesn't have any special requirements for the -cert-manager deployment itself. The only expectation is that cert-manager and -the corresponding Custom Resources exist in the cluster. - -You should be free to install cert-manager in the cluster in any way that makes -sense for your organization. You can find some [installation ideas] in the -cert-manager docs. - -### Issuers - -A default Upbound Space install includes a [ClusterIssuer]. This `ClusterIssuer` -is a `selfSigned` issuer that other certificates are minted from. You have a -couple of options available to you for changing the default deployment of the -Issuer: -1. Changing the issuer name. -2. Providing your own ClusterIssuer. - - -#### Changing the issuer name - -The `ClusterIssuer` name is controlled by the `certificates.space.clusterIssuer` -Helm property. You can adjust this during installation by providing the -following parameter (assuming your new name is 'SpaceClusterIssuer'): -```shell ---set "certificates.space.clusterIssuer=SpaceClusterIssuer" -``` - - - -#### Providing your own ClusterIssuer - -To provide your own `ClusterIssuer`, you need to first setup your own -`ClusterIssuer` in the cluster. The cert-manager docs have a variety of options -for providing your own. See the [Issuer Configuration] docs for more details. - -Once you have your own `ClusterIssuer` set up in the cluster, you need to turn -off the deployment of the `ClusterIssuer` included in the Spaces deployment. -To do that, provide the following parameter during installation: -```shell ---set "certificates.provision=false" -``` - -###### Considerations -If your `ClusterIssuer` has a name that's different from the default name that -the Spaces installation expects ('spaces-selfsigned'), you need to also specify -your `ClusterIssuer` name during install using: -```shell ---set "certificates.space.clusterIssuer=" -``` - -## Exposing spaces externally - -To route requests from external clients (kubectl, ArgoCD, etc.) to control planes, you must expose the spaces-router. Options include a LoadBalancer Service (recommended), Gateway API, or Ingress. See [Exposing Spaces externally][expose] for configuration and setup. - -[cert-manager]: https://cert-manager.io/ -[Certificate Custom Resource]: https://cert-manager.io/docs/usage/certificate/ -[ClusterIssuer]: https://cert-manager.io/docs/concepts/issuer/ -[installation ideas]: https://cert-manager.io/docs/installation/ -[Issuer Configuration]: https://cert-manager.io/docs/configuration/ -[official-provider-families]: /manuals/packages/providers/provider-families -[aws-eks]: https://aws.amazon.com/eks/ -[google-cloud-gke]: https://cloud.google.com/kubernetes-engine -[microsoft-aks]: https://azure.microsoft.com/en-us/products/kubernetes-service -[upbound-account]: https://www.upbound.io/register/?utm_source=docs&utm_medium=cta&utm_campaign=docs_spaces -[provider-aws-ec2]: https://marketplace.upbound.io/providers/upbound/provider-aws-ec2 -[provider-aws-s3]: https://marketplace.upbound.io/providers/upbound/provider-aws-s3 -[provider-aws-iam]: https://marketplace.upbound.io/providers/upbound/provider-aws-iam -[azure-limits]: https://learn.microsoft.com/en-us/azure/azure-resource-manager/management/request-limits-and-throttling -[aws-ec2-limits]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/throttling.html#throttling-limits-rate-based -[upjet]: https://github.com/upbound/upjet -[expose]: /self-hosted-spaces/howtos/ingress/ diff --git a/self-hosted-spaces_versioned_docs/version-1.16/howtos/dr.md b/self-hosted-spaces_versioned_docs/version-1.16/howtos/dr.md deleted file mode 100644 index acfa67cf9..000000000 --- a/self-hosted-spaces_versioned_docs/version-1.16/howtos/dr.md +++ /dev/null @@ -1,412 +0,0 @@ ---- -title: Disaster Recovery -sidebar_position: 13 -description: Configure Space-wide backups for disaster recovery. ---- - -:::info API Version Information -This guide applies to **Spaces v1.9.0 and later** (Self-Hosted only). Space Backups is a alpha feature enabled by default starting in v1.14.0. - -- **v1.9.0-v1.13.x**: Alpha feature (requires manual enablement) -- **v1.14.0+**: GA (enabled by default) - -For version-specific features and backup resources, see the . control-plane backups, see [Backup and Restore](./backup-and-restore.md). -::: - -:::important -For Connected and Disconnected Spaces, this feature requires Spaces `v1.9.0` and, starting with `v1.14.0`, Spaces enables it by default. - -To enable it on versions earlier than `v1.14.0`, set features.alpha.spaceBackup.enabled=true when you install Spaces. - -```bash -up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ - ... - --set "features.alpha.spaceBackup.enabled=true" -``` -::: - -Upbound's _Space Backups_ is a built-in Space-wide backup and restore feature. This guide explains how to configure Space Backups and how to restore from one of them in case of disaster recovery. - -This feature for Space administrators. Group or Control Plane users can leverage [Shared Backups][shared-backups] to backup and restore their ControlPlanes. - -## Benefits -The Space Backups feature provides the following benefits: - -* Automatic backups for all resources in a Space and all resources in control planes, without any operational overhead. -* Backup schedules. -* Selectors to specify resources to backup. - -## Prerequisites - -Enabled the Space Backups feature in the Space: - -- Cloud Spaces: Not accessible to users. -- Connected Spaces: Space administrator must enable this feature. -- Disconnected Spaces: Space administrator must enable this feature. - -## Configure a Space Backup Config - -[SpaceBackupConfig][spacebackupconfig] is a cluster-scoped resource. This resource configures the storage details and provider. Whenever a backup executes (either by schedule or manually initiated), it references a SpaceBackupConfig to tell it where store the snapshot. - - -### Backup config provider - - -The `spec.objectStorage.provider` and `spec.objectStorage.config` fields configures: - -* The object storage provider -* The path to the provider -* The credentials needed to communicate with the provider - -You can only set one provider. Upbound currently supports AWS, Azure, and GCP as providers. - - -`spec.objectStorage.config` is a freeform map of configuration options for the object storage provider. See [Thanos object storage][thanos-object-storage] for more information on the formats for each supported cloud provider. `spec.bucket` and `spec.provider` overrides the required values in the config. - - -#### AWS as a storage provider - -This example demonstrates how to use AWS as a storage provider for your backups: - -```yaml -apiVersion: admin.spaces.upbound.io/v1alpha1 -kind: SpaceBackupConfig -metadata: - name: default -spec: - objectStorage: - provider: AWS - bucket: spaces-backup-bucket - config: - endpoint: s3.eu-west-2.amazonaws.com - region: eu-west-2 - credentials: - source: Secret - secretRef: - name: bucket-creds - namespace: upbound-system - key: creds -``` - -This example assumes you've already created an S3 bucket called -`spaces-backup-bucket` in the `eu-west-2` AWS region. To access the bucket, -define the account credentials as a Secret in the specified Namespace -(`upbound-system` in this example). - -#### Azure as a storage provider - -This example demonstrates how to use Azure as a storage provider for your backups: - -```yaml -apiVersion: admin.spaces.upbound.io/v1alpha1 -kind: SpaceBackupConfig -metadata: - name: default - namespace: default -spec: - objectStorage: - provider: Azure - bucket: upbound-backups - config: - storage_account: upbackupstore - container: upbound-backups - endpoint: blob.core.windows.net - credentials: - source: Secret - secretRef: - name: bucket-creds - namespace: upbound-system - key: creds -``` - - -This example assumes you've already created an Azure storage account called -`upbackupstore` and blob `upbound-backups`. To access the blob, -define the account credentials as a Secret in the specified Namespace -(`upbound-system` in this example). - - -#### GCP as a storage provider - -This example demonstrates how to use Google Cloud Storage as a storage provider for your backups: - -```yaml -apiVersion: admin.spaces.upbound.io/v1alpha1 -kind: SpaceBackupConfig -metadata: - name: default - namespace: default -spec: - objectStorage: - provider: GCP - bucket: spaces-backup-bucket - credentials: - source: Secret - secretRef: - name: bucket-creds - namespace: upbound-system - key: creds -``` - - -This example assumes you've already created a Cloud bucket called -"spaces-backup-bucket" and a service account with access to this bucket. Define the key file as a Secret in the specified Namespace -(`upbound-system` in this example). - - -## Configure a Space Backup Schedule - - -[SpaceBackupSchedule][spacebackupschedule] is a cluster-scoped resource. This resource defines a backup schedule for the whole Space. - -Below is an example of a Space Backup Schedule running every day. It backs up all groups having `environment: production` labels and all control planes in those groups having `backup: please` labels. - -```yaml -apiVersion: admin.spaces.upbound.io/v1alpha1 -kind: SpaceBackupSchedule -metadata: - name: daily-schedule -spec: - schedule: "@daily" - configRef: - kind: SpaceBackupConfig - name: default - match: - groups: - labelSelectors: - - matchLabels: - environment: production - controlPlanes: - labelSelectors: - - matchLabels: - backup: please -``` - -### Define a schedule - -The `spec.schedule` field is a [Cron-formatted][cron-formatted] string. Some common examples are below: - -| Entry | Description | -| ----------------- | ------------------------------------------------------------------------------------------------- | -| `@hourly` | Run once an hour. | -| `@daily` | Run once a day. | -| `@weekly` | Run once a week. | -| `0 0/4 * * *` | Run every 4 hours. | -| `0/15 * * * 1-5` | Run every fifteenth minute on Monday through Friday. | -| `@every 1h30m10s` | Run every 1 hour, 30 minutes, and 10 seconds. Hour is the largest measurement of time for @every. | - -### Suspend a schedule - -Use `spec.suspend` field to suspend the schedule. It creates no new backups, but allows running backups to complete. - -```yaml -apiVersion: admin.spaces.upbound.io/v1alpha1 -kind: SpaceBackupSchedule -metadata: - name: daily-schedule -spec: - suspend: true -... -``` - -### Garbage collect backups when the schedule gets deleted - -Set the `spec.useOwnerReferencesInBackup` to garbage collect associated `SpaceBackup` when a `SpaceBackupSchedule` gets deleted. If set to true, backups are garbage collected when the schedule gets deleted. - -### Set the time to live - -Set the `spec.ttl` field to define the time to live for the backup. After this time, the backup is eligible for garbage collection. If this field isn't set, the backup isn't garbage collected. - -The time to live is a duration, for example, `168h` for 7 days. - -```yaml -apiVersion: admin.spaces.upbound.io/v1alpha1 -kind: SpaceBackupSchedule -metadata: - name: daily-schedule -spec: - ttl: 168h # Backup is garbage collected after 7 days -... -``` - -## Selecting space resources to backup - -By default, a SpaceBackup selects all groups and, for each of them, all control planes, secrets, and any other group-scoped resources. - -By setting `spec.match`, you can include only specific groups, control planes, secrets, or other Space resources in the backup. - -By setting `spec.exclude`, you can filter out some matched Space API resources from the backup. - -### Including space resources in a backup - -Different fields are available to include resources based on labels or names: -- `spec.match.groups` to include only some groups in the backup. -- `spec.match.controlPlanes` to include only some control planes in the backup. -- `spec.match.secrets` to include only some secrets in the backup. -- `spec.match.extras` to include only some extra resources in the backup. - -```yaml -apiVersion: admin.spaces.upbound.io/v1alpha1 -kind: SpaceBackup -metadata: - name: my-backup -spec: - configRef: - kind: SpaceBackupConfig - name: default - match: - groups: - labelSelectors: - - matchLabels: - environment: production - controlPlanes: - labelSelectors: - - matchLabels: - backup: please - secrets: - names: - - my-secret - extras: - - apiGroup: "spaces.upbound.io" - kind: "SharedBackupConfig" - names: - - my-shared-backup -``` - -### Excluding Space resources from the backup - -Use the `spec.exclude` field to exclude matched Space API resources from the backup. - -Different fields are available to exclude resources based on labels or names: -- `spec.exclude.groups` to exclude some groups from the backup. -- `spec.exclude.controlPlanes` to exclude some control planes from the backup. -- `spec.exclude.secrets` to exclude some secrets from the backup. -- `spec.exclude.extras` to exclude some extra resources from the backup. - -```yaml -apiVersion: admin.spaces.upbound.io/v1alpha1 -kind: SpaceBackup -metadata: - name: my-backup -spec: - ttl: 168h # Backup is garbage collected after 7 days - configRef: - kind: SpaceBackupConfig - name: default - match: - groups: - labelSelectors: - - matchLabels: - environment: production - exclude: - groups: - names: - - not-this-one-please -``` - -### Exclude resources in control planes' backups - -By default, it backs up all resources in a selected control plane. - -Use the `spec.controlPlaneBackups.excludedResources` field to exclude resources from control planes' backups. - -```yaml -apiVersion: admin.spaces.upbound.io/v1alpha1 -kind: SpaceBackup -metadata: - name: my-backup -spec: - ttl: 168h # Backup is garbage collected after 7 days - configRef: - kind: SpaceBackupConfig - name: default - controlPlaneBackups: - excludedResources: - - secrets - - buckets.s3.aws.upbound.io -``` - -## Create a manual backup - -[SpaceBackup][spacebackup] is a cluster-scoped resource that causes a single backup to occur for the whole Space. - -Below is an example of a manual SpaceBackup: - -```yaml -apiVersion: admin.spaces.upbound.io/v1alpha1 -kind: SpaceBackup -metadata: - name: my-backup -spec: - configRef: - kind: SpaceBackupConfig - name: default - deletionPolicy: Delete -``` - - -The backup specification `DeletionPolicy` defines backup deletion actions, -including the deletion of the backup file from the bucket. The `Deletion Policy` -value defaults to `Orphan`. Set it to `Delete` to remove uploaded files -in the bucket. -For more information on the backup and restore process, review the [Spaces API documentation][spaces-api-documentation]. - -### Set the time to live - -Set the `spec.ttl` field to define the time to live for the backup. After this time, the backup is eligible for garbage collection. If this field isn't set, the backup isn't garbage collected. The time to live is a duration, for example, `168h` for 7 days. - -```yaml -apiVersion: admin.spaces.upbound.io/v1alpha1 -kind: SpaceBackup -metadata: - name: my-backup -spec: - ttl: 168h # Backup is garbage collected after 7 days -... -``` - -## Restore from a space backup - -Space Backup and Restore focuses only on disaster recovery. The restore procedure assumes a new Space installation with no existing resources. The restore procedure is idempotent, so you can run it multiple times without any side effects in case of failures. - -To restore a Space from an existing Space Backup, follow these steps: - -1. Install Spaces from scratch as needed. -2. Create a `SpaceBackupConfig` as needed to access the SpaceBackup from the object storage, for example named `my-backup-config`. -3. Select the backup you want to restore from, for example `my-backup`. -4. Run the following command to restore the Space: - -```shell -export SPACE_BACKUP_CONFIG=my-backup-config -export SPACE_BACKUP=my-backup -kubectl exec -ti -n upbound-system deployments/spaces-controller -c spaces -- hyperspace restore $SPACE_BACKUP $SPACE_BACKUP_CONFIG -``` - -### Restore specific control planes - -:::important -This feature is available from Spaces v1.11. -::: - -Instead of restoring the whole Space, you can choose to restore specific control planes -from a backup using the `--controlplanes` flag. You can also use -the `--skip-space-restore` flag to skip restoring Space objects. -This allows Spaces admins to restore individual control planes without -needing to restore the entire Space. - -```shell -export SPACE_BACKUP_CONFIG=my-backup-config -export SPACE_BACKUP=my-backup -kubectl exec -ti -n upbound-system deployments/spaces-controller -c spaces --- hyperspace restore $SPACE_BACKUP $SPACE_BACKUP_CONFIG --controlplanes default/ctp1,default/ctp2 --skip-space-restore -``` - - -[shared-backups]: /self-hosted-spaces/howtos/workload-id/backup-restore-config/ -[spacebackupconfig]: /self-hosted-spaces/reference/ -[thanos-object-storage]: https://thanos.io/tip/thanos/storage.md/ -[spacebackupschedule]: /self-hosted-spaces/reference/ -[cron-formatted]: https://en.wikipedia.org/wiki/Cron -[spacebackup]: /self-hosted-spaces/reference/ -[spaces-api-documentation]: /self-hosted-spaces/reference/ - diff --git a/self-hosted-spaces_versioned_docs/version-1.16/howtos/gitops.md b/self-hosted-spaces_versioned_docs/version-1.16/howtos/gitops.md deleted file mode 100644 index e0b462375..000000000 --- a/self-hosted-spaces_versioned_docs/version-1.16/howtos/gitops.md +++ /dev/null @@ -1,142 +0,0 @@ ---- -title: GitOps with ArgoCD in Self-Hosted Spaces -sidebar_position: 80 -description: Set up GitOps workflows with Argo CD in self-hosted Spaces -plan: "business" ---- - -:::info Deployment Model -This guide applies to **self-hosted Spaces** deployments. For Upbound Cloud Spaces, see [GitOps with Upbound Control Planes](/cloud-spaces/howtos/gitops-on-upbound/). -::: - -GitOps is an approach for managing a system by declaratively describing desired resources' configurations in Git and using controllers to realize the desired state. Upbound's control planes are compatible with this pattern and it's strongly recommended you integrate GitOps in the platforms you build on Upbound. - - -## Integrate with Argo CD - - -[Argo CD][argo-cd] is a project in the Kubernetes ecosystem commonly used for -GitOps. You can use it in tandem with Upbound control planes to achieve GitOps -flows. The sections below explain how to integrate these tools with Upbound. - -### Configure connection secrets for control planes - -You can configure control planes to write their connection details to a secret. -Do this by setting the -[`spec.writeConnectionSecretToRef`][spec-writeconnectionsecrettoref] field in a -control plane manifest. For example: - -```yaml -apiVersion: spaces.upbound.io/v1beta1 -kind: ControlPlane -metadata: - name: ctp1 - namespace: default -spec: - writeConnectionSecretToRef: - name: kubeconfig-ctp1 - namespace: default -``` - - -### Configure Argo CD - - -To configure Argo CD for Annotation resource tracking, edit the Argo CD -ConfigMap in the Argo CD namespace. Add `application.resourceTrackingMethod: -annotation` to the data section as below. - -Next, configure the [auto respect RBAC for the Argo CD -controller][auto-respect-rbac-for-the-argo-cd-controller-1]. By default, Argo CD -attempts to discover some Kubernetes resource types that don't exist in a -control plane. You must configure Argo CD to respect the cluster's RBAC rules so -that Argo CD can sync. Add `resource.respectRBAC: normal` to the data section as -below. - -```bash -apiVersion: v1 -kind: ConfigMap -metadata: - name: argocd-cm -data: - ... - application.resourceTrackingMethod: annotation - resource.respectRBAC: normal -``` - -:::tip -The `resource.respectRBAC` configuration above tells Argo to respect RBAC for -_all_ cluster contexts. If you're using an Argo CD instance to manage more than -control planes, change the `clusters` string match to apply only to control -planes. For example, if every control plane context name follows the -`controlplane-` naming convention, you should set the string match to be -`controlplane-*`. -::: - - -### Create a cluster context definition - - -Once the control plane is ready, extract the following values from the secret -containing the kubeconfig: - -```bash -kubeconfig_content=$(kubectl get secrets kubeconfig-ctp1 -n default -o jsonpath='{.data.kubeconfig}' | base64 -d) -server=$(echo "$kubeconfig_content" | grep 'server:' | awk '{print $2}') -bearer_token=$(echo "$kubeconfig_content" | grep 'token:' | awk '{print $2}') -ca_data=$(echo "$kubeconfig_content" | grep 'certificate-authority-data:' | awk '{print $2}') -``` - -Generate a new secret in the cluster where you installed Argo, using the prior -values extracted: - -```yaml -cat < - -## Enable Kubernetes Hub authorization - -To enable Kubernetes Hub Authentication in your Space, you need: -- A Kubernetes cluster with RBAC enabled -- `authorization.hubRBAC` set to `true` (enabled by default) - -Users can authenticate to the single-tenant Space with their Kubernetes credentials using this method. - -## Configure Kubernetes RBAC - -To configure Kubernetes RBAC in your Disconnected Space, you need to create `ClusterRoles` and `Roles` for defining access to your resources. For example: - -```yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: controlplane-getter -rules: -- apiGroups: ["spaces.upbound.io"] - resources: ["controlplanes"] - verbs: ["get", "list", "watch"] -``` - -Next, create `ClusterRoleBindings` and `RoleBindings` to assign roles to subjects like users, groups, or service accounts: - -```yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: controlplane-getters -subjects: -- kind: User - name: upbound:(user|robot): - apiGroup: rbac.authorization.k8s.io -roleRef: - kind: ClusterRole - name: controlplane-getter - apiGroup: rbac.authorization.k8s.io -``` - -The `subject` in this example can contain teams (`upbound:team:`) or org roles (`upbound:org-role:admin|member`) depending on your role needs. - -## Upbound RBAC integration - - - - -You can use the special verbs `admin`, `edit` and `view` for giving a subject access to a control plane: -```yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: controlplane-editor -rules: -- apiGroups: ["spaces.upbound.io"] - resources: ["controlplanes/k8s"] - verbs: ["edit"] # or "admin" or "view", depending on access level -``` - - - -[upbound-rbac]: /manuals/platform/rbac diff --git a/self-hosted-spaces_versioned_docs/version-1.16/howtos/ingress-nginx-migration.md b/self-hosted-spaces_versioned_docs/version-1.16/howtos/ingress-nginx-migration.md deleted file mode 100644 index be707aee0..000000000 --- a/self-hosted-spaces_versioned_docs/version-1.16/howtos/ingress-nginx-migration.md +++ /dev/null @@ -1,764 +0,0 @@ ---- -title: Migrate away from ingress-nginx -sidebar_position: 7 -description: A guide on how to migrate from ingress-nginx -tier: "business" ---- - -import GlobalLanguageSelector, { CodeBlock } from '@site/src/components/GlobalLanguageSelector'; - - - -`ingress-nginx` is deprecated and will reach end-of-life in March 2026. This -guide covers migration options for existing Spaces deployments. - -For help choosing an exposure method, see [Exposing Spaces Externally][expose]. -Options vary by Spaces version. Select your Spaces version: - -* [Upgrading to Spaces 1.16+](#upgrading-to-spaces-116) -* [Staying on Spaces 1.15 or earlier](#migrate-while-on-spaces-115-or-earlier) - - -## Prerequisites - - -Set environment variables used throughout this guide: - -```bash -export SPACES_VERSION= # Example: 1.16.0 -export SPACES_ROUTER_HOST= # Example: proxy.example.com -``` - -Export your current Helm values to a file (or use an existing version-controlled -file): - -```bash -helm get values spaces -n upbound-system -o yaml > values.yaml -``` - -You'll merge new configuration into this file throughout the migration. - -## Upgrading to Spaces 1.16+ - -Choose your migration option: - -| Option | When to use | -|--------|-------------| -| [LoadBalancer Service](#loadbalancer-service-recommended) | Simplest setup, no additional components needed | -| [Gateway API](#gateway-api) | Already using Gateway API or need shared gateway | -| [Alternative ingress controller](#alternative-ingress-controller) | Already using Ingress, or need shared load balancer | - -All paths follow the same process: upgrade to 1.16+, switch exposure method, -then uninstall ingress-nginx. - - -### Upgrade to 1.16+ with Updated Ingress Values - -Spaces doesn't provision the Ingress resource by default and is now -controller-agnostic. - -Add the following to your `values.yaml` to keep ingress-nginx working: - -```yaml -ingress: - provision: true - host: proxy.example.com # Replace with your existing hostname - ingressClassName: nginx - annotations: - nginx.ingress.kubernetes.io/ssl-passthrough: "true" - nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" - podLabels: - app.kubernetes.io/name: ingress-nginx - app.kubernetes.io/component: controller - namespaceLabels: - kubernetes.io/metadata.name: ingress-nginx -``` - -Upgrade Spaces to 1.16+: - -```bash -helm upgrade spaces oci://xpkg.upbound.io/spaces-artifacts/spaces \ - --version ${SPACES_VERSION} \ - --namespace upbound-system \ - -f values.yaml \ - --wait -``` - -Verify ingress-nginx is still working before you continue. - -### LoadBalancer Service (Recommended) - -This section describes how to expose the `spaces-router` with a LoadBalancer -Service. - -:::important -Use a Network Load Balancer (L4), not an Application Load Balancer (L7). Spaces -uses long-lived connections for watch traffic that L7 load balancers may -timeout. -::: - -**1. Add the LoadBalancer Service configuration to your `values.yaml`** - -Add the configuration for your cloud: - - - -```yaml -externalTLS: - host: proxy.example.com # Must match your current router hostname - -router: - proxy: - service: - type: LoadBalancer - annotations: - service.beta.kubernetes.io/aws-load-balancer-type: external - service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing - service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip -``` - - - - - -```yaml -externalTLS: - host: proxy.example.com # Must match your current router hostname - -router: - proxy: - service: - type: LoadBalancer - annotations: - cloud.google.com/l4-rbs: enabled -``` - - - - - -```yaml -externalTLS: - host: proxy.example.com # Must match your current router hostname - -router: - proxy: - service: - type: LoadBalancer - annotations: {} -``` - - - -**2. Upgrade Spaces (Ingress stays running during transition)** - -```bash -helm upgrade spaces oci://xpkg.upbound.io/spaces-artifacts/spaces \ - --version ${SPACES_VERSION} \ - --namespace upbound-system \ - -f values.yaml \ - --wait -``` - -**3. Get the new LoadBalancer address** - -```bash -kubectl get svc -n upbound-system spaces-router \ - -o jsonpath='{.status.loadBalancer.ingress[0].hostname}' -``` - -**4. Validate before switching DNS** - -```bash -# Get spaces-router load balancer address -ROUTER_LB=$(kubectl get svc -n upbound-system spaces-router -o jsonpath='{.status.loadBalancer.ingress[0].hostname}') - -# Test connectivity using --connect-to to route to the new LB -curl --connect-to "${SPACES_ROUTER_HOST}:443:${ROUTER_LB}:443" "https://${SPACES_ROUTER_HOST}/version" -# Expected: 401 Unauthorized (routing works, auth required) -``` - -**5. Update your DNS record to point to the new LoadBalancer address** - -**6. Update your `values.yaml` to disable Ingress, then upgrade Spaces** - -```yaml -ingress: - provision: false -``` - -```bash -helm upgrade spaces oci://xpkg.upbound.io/spaces-artifacts/spaces \ - --version ${SPACES_VERSION} \ - --namespace upbound-system \ - -f values.yaml \ - --wait -``` - -**7. Uninstall ingress-nginx** - -```bash -helm uninstall ingress-nginx --namespace ingress-nginx -``` - -### Gateway API - -Spaces supports the [Gateway API][gateway-api] as an alternative to Ingress. Gateway API is the -Kubernetes standard for traffic routing going forward. - - -**1. Install Envoy Gateway** - -```bash -helm install eg oci://docker.io/envoyproxy/gateway-helm \ - --namespace envoy-gateway-system \ - --create-namespace \ - --wait -``` - -See [Envoy Gateway installation docs][envoy-install] for more detailed instructions. - -**2. Create EnvoyProxy and GatewayClass** - -Create an `EnvoyProxy` resource that the GatewayClass references. See the [Envoy -Gateway EnvoyProxy docs][envoy-proxy] for the full API and more examples. - - - -```bash -kubectl apply -f - < - - - -```bash -kubectl apply -f - < - - - -```bash -kubectl apply -f - < - -Then create the `GatewayClass` that references this EnvoyProxy: - -```bash -kubectl apply -f - --server-side < - annotations: - # Add your controller's TLS passthrough annotations - podLabels: - # Labels matching your controller's pods - namespaceLabels: - # Labels matching your controller's namespace -``` - -**3. Upgrade Spaces** - -```bash -helm upgrade spaces oci://xpkg.upbound.io/spaces-artifacts/spaces \ - --version ${SPACES_VERSION} \ - --namespace upbound-system \ - -f values.yaml \ - --wait -``` - -**4. Get the new load balancer address** - -```bash -kubectl get svc -n -o jsonpath='{.status.loadBalancer.ingress[0].hostname}' -``` - -**5. Validate before switching DNS** - -Test connectivity using `--connect-to` so traffic reaches the new controller -before you change DNS: - -```bash -CONTROLLER_LB=$(kubectl get svc -n -o jsonpath='{.status.loadBalancer.ingress[0].hostname}') -curl --connect-to "${SPACES_ROUTER_HOST}:443:${CONTROLLER_LB}:443" "https://${SPACES_ROUTER_HOST}/version" -# Expected: 401 Unauthorized (routing works, auth required) -``` - -**6. Update your DNS record** to point to the new load balancer address. - -**7. Uninstall ingress-nginx** - -```bash -helm uninstall ingress-nginx --namespace ingress-nginx -``` - - -## Migrate while on Spaces 1.15 or earlier - - -Choose your migration option: - -| Option | When to use | -|--------|-------------| -| [Gateway API](#gateway-api-spaces-110) | Already using Gateway API or need shared gateway | -| [Traefik](#traefik-or-alternative-ingress-controller) | Migrate from nginx Ingress to alternative controller | - -Export your current Helm values to a file (or use your existing values file if -stored in Git): - -```bash -helm get values spaces -n upbound-system -o yaml > values.yaml -``` - - -### Gateway API (Spaces 1.10+) - - -Gateway API support has been available since Spaces 1.10. See [Gateway API -Configuration][gateway-api-config] for detailed setup instructions. - -:::note -Pre-1.16 Spaces doesn't support running Ingress and Gateway API -simultaneously. This migration requires switching over in a single upgrade, -which causes brief downtime during DNS propagation. -::: - - -**1. Install a gateway API controller** - -Install a Gateway API implementation that supports TLS passthrough and -`TLSRoute`. - -The following example uses Envoy Gateway: - -```bash -export ENVOY_GATEWAY_VERSION= # Example: v1.2.4 - -helm -n envoy-gateway-system upgrade --install --wait --wait-for-jobs \ - --timeout 300s --create-namespace envoy-gateway \ - oci://docker.io/envoyproxy/gateway-helm \ - --version "${ENVOY_GATEWAY_VERSION}" -``` - -**2. Create EnvoyProxy and GatewayClass** - -Create an `EnvoyProxy` resource, then the `GatewayClass` that references it. - - - -```bash -kubectl apply -f - < - - - -```bash -kubectl apply -f - < - - - -```bash -kubectl apply -f - < - -```bash -kubectl apply -f - --server-side < -:::tip -Configure Traefik's Service with NLB annotations. See -[Cloud-specific annotations][expose-annotate]. -::: - - -**2. Validate before switching DNS** - -```bash -# Get Traefik load balancer address -TRAEFIK_LB=$(kubectl get svc -n traefik traefik -o jsonpath='{.status.loadBalancer.ingress[0].hostname}') - -# Test connectivity using --connect-to to route to Traefik -curl --connect-to "${SPACES_ROUTER_HOST}:443:${TRAEFIK_LB}:443" "https://${SPACES_ROUTER_HOST}/version" -# Expected: 401 Unauthorized (routing works, auth required) -``` - -**3. Update DNS to point to Traefik** - -```bash -kubectl get svc -n traefik traefik -o jsonpath='{.status.loadBalancer.ingress[0].hostname}' -``` - -Update your DNS record to this address. For gradual migration, use weighted DNS routing. - -**4. Preserve the nginx IngressClass before uninstalling ingress-nginx** - -```bash -helm upgrade ingress-nginx ingress-nginx \ - --repo https://kubernetes.github.io/ingress-nginx \ - --namespace ingress-nginx \ - --reuse-values \ - --set-json 'controller.ingressClassResource.annotations={"helm.sh/resource-policy": "keep"}' -``` - -**5. Uninstall ingress-nginx** - -```bash -helm uninstall ingress-nginx --namespace ingress-nginx -``` - -Keep `ingress.provision: true` so the Spaces chart continues to manage the -Ingress resource. Traefik picks it up via the nginx provider. - -## Verification - -After migration, verify connectivity: - -```bash -curl -v "https://${SPACES_ROUTER_HOST}/version" -# Expected: 401 Unauthorized -``` - -If you need to roll back, reinstall ingress-nginx, set `ingress.provision: true` -(and your previous ingress values) in Helm values, then upgrade Spaces again. - -[envoy-install]: https://gateway.envoyproxy.io/docs/install/ -[envoy-proxy]: https://gateway.envoyproxy.io/latest/tasks/operations/customize-envoyproxy/ -[spaces-install]: /self-hosted-spaces/howtos/self-hosted-spaces-deployment/ -[traefik-migrate]: https://doc.traefik.io/traefik/migrate/nginx-to-traefik/ -[spaces-deploy]: /self-hosted-spaces/howtos/self-hosted-spaces-deployment/ -[k8s-announce]: https://www.kubernetes.dev/blog/2025/11/12/ingress-nginx-retirement/ -[expose]: /self-hosted-spaces/howtos/ingress/ -[expose-annotate]: /self-hosted-spaces/howtos/ingress/#cloud-specific-annotations -[gateway-api]: https://gateway-api.sigs.k8s.io/ -[gateway-api-config]: /self-hosted-spaces/howtos/ingress/#gateway-api diff --git a/self-hosted-spaces_versioned_docs/version-1.16/howtos/ingress.md b/self-hosted-spaces_versioned_docs/version-1.16/howtos/ingress.md deleted file mode 100644 index e51bccb77..000000000 --- a/self-hosted-spaces_versioned_docs/version-1.16/howtos/ingress.md +++ /dev/null @@ -1,226 +0,0 @@ ---- -title: Exposing Spaces externally -sidebar_position: 5 -description: Options for exposing Spaces externally ---- - -import GlobalLanguageSelector, { CodeBlock } from '@site/src/components/GlobalLanguageSelector'; - - - -You can expose Spaces externally using one of three options: - -| Option | When to use | -|--------|-------------| -| LoadBalancer Service | Simplest setup, recommended for most deployments | -| Gateway API | Organization already using Gateway API, or need shared gateway across services | -| Ingress | Organization already using Ingress, or need shared load balancer across services | - -## LoadBalancer Service - - -Upbound recommends a LoadBalancer Service to expose `spaces-router`. - - -:::important -Use a Network Load Balancer (L4), not an Application Load Balancer (L7). Spaces -uses long-lived connections for watch traffic that L7 load balancers may -timeout. -::: - -### Configuration - -Add the following to your **Spaces Helm values** (or pass via `--set` when installing or upgrading). Choose the block for your cloud: - - - -```yaml -externalTLS: - host: proxy.example.com # Externally routable hostname for TLS certificates - -router: - proxy: - service: - type: LoadBalancer - annotations: - service.beta.kubernetes.io/aws-load-balancer-type: external - service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing - service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip -``` - - - - - -```yaml -externalTLS: - host: proxy.example.com # Externally routable hostname for TLS certificates - -router: - proxy: - service: - type: LoadBalancer - annotations: {} # Azure uses L4 by default -``` - - - - - -```yaml -externalTLS: - host: proxy.example.com # Externally routable hostname for TLS certificates - -router: - proxy: - service: - type: LoadBalancer - annotations: - cloud.google.com/l4-rbs: enabled -``` - - - - -### Get the LoadBalancer address - - -After installation: - -```bash -kubectl get svc -n upbound-system spaces-router \ - -o jsonpath='{.status.loadBalancer.ingress[0].hostname}' -``` - -Create or update a DNS record pointing your `externalTLS.host` to this address. - -## Ingress - -Use Ingress if you need to share a load balancer across multiple services or -have specific networking requirements. - -### Requirements - -- TLS passthrough support in your Ingress controller -- Network Load Balancer (L4) strongly recommended for long-lived connections - -Configure your Ingress controller's Service with [NLB annotations](#cloud-specific-annotations). - - -### Configuration - -Add the following to your **Spaces Helm values** (or pass via `--set` when installing or upgrading): - -```yaml -ingress: - provision: true - host: proxy.example.com - ingressClassName: "" - # Annotations to add to the Ingress resource - annotations: {} - # Pod labels of the Ingress controller - used for network policy - podLabels: {} - # Namespace labels of the Ingress controller - used for network policy - namespaceLabels: {} -``` - -### Traefik (with nginx provider) - -Traefik can use the [kubernetesIngressNGINX provider][traefik-provider] to -handle nginx-style Ingress resources with TLS passthrough. Add to your **Spaces Helm values**: - -```yaml -ingress: - provision: true - host: proxy.example.com - ingressClassName: nginx - annotations: - nginx.ingress.kubernetes.io/ssl-passthrough: "true" - nginx.ingress.kubernetes.io/force-ssl-redirect: "true" - podLabels: - app.kubernetes.io/name: traefik - namespaceLabels: - kubernetes.io/metadata.name: traefik -``` - -## Gateway API - -Spaces supports the [Gateway API][gateway-api-docs]. Use this option if your -organization is already using Gateway API or needs a shared gateway across -multiple services. - -### Requirements - -- A Gateway API controller (for example, Envoy Gateway, Cilium, or Traefik) -- Gateway API CRDs installed in your cluster -- TLS passthrough support -- Network Load Balancer (L4) strongly recommended - -For step-by-step setup including installing a controller (such as Envoy Gateway) and creating the GatewayClass, see [Migrate away from ingress-nginx - Gateway API][migration-gateway-api]. - -### Gateway API configuration - -Add the following to your **Spaces Helm values** (or use `--set` when upgrading). Disable Ingress when using Gateway API. - -```yaml -gatewayAPI: - host: proxy.example.com # Externally routable hostname; must match your DNS - gateway: - provision: true - name: spaces - className: spaces # Must match your GatewayClass name - spacesRouterRoute: - provision: true - podLabels: - app.kubernetes.io/name: envoy - app.kubernetes.io/component: proxy - app.kubernetes.io/managed-by: envoy-gateway - namespaceLabels: - kubernetes.io/metadata.name: envoy-gateway-system -``` - -The `podLabels` and `namespaceLabels` must match your Gateway API controller's pods and namespace (for NetworkPolicy). The example above uses Envoy Gateway; adjust for your controller. - -Install or upgrade Spaces with these values: - -```bash -helm upgrade spaces oci://xpkg.upbound.io/spaces-artifacts/spaces \ - --namespace upbound-system \ - -f values.yaml \ - --wait -``` - -### Get the gateway address - -After your controller has configured the Gateway: - -```bash -kubectl get gateway -n upbound-system spaces \ - -o jsonpath='{.status.addresses[0].value}' -``` - -Create or update a DNS record so that `gatewayAPI.host` points to this address. - -### Troubleshooting - -- **Gateway status:** Run `kubectl get gateway -n upbound-system spaces -o yaml`. Look for `Accepted` and `Programmed` in `status.conditions`. -- **TLSRoute status:** Run `kubectl get tlsroute -n upbound-system spaces-router -o yaml`. The route should show `Accepted: True`. -- **Connectivity:** Run `curl -k "https:///version"`. Expected response is `401 Unauthorized` (routing works, auth required). - -## Cloud-specific annotations - -Network Load Balancers (L4) are strongly recommended. Spaces uses long-lived -watch connections (hours or days) for kubectl and ArgoCD. L7 load balancers may -timeout these connections. Use these annotations on the LoadBalancer Service -(spaces-router, Ingress controller, or Gateway). - -| Cloud | Annotations | -|-------|-------------| -| **AWS** | `service.beta.kubernetes.io/aws-load-balancer-type: external`
`service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing`
`service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip` | -| **GCP** | `cloud.google.com/l4-rbs: enabled` | -| **Azure** | None required (L4 by default) | - - -[traefik-provider]: https://doc.traefik.io/traefik/reference/install-configuration/providers/kubernetes/kubernetes-ingress-nginx/ -[gateway-api-docs]: https://gateway-api.sigs.k8s.io/ -[migration-gateway-api]: /self-hosted-spaces/howtos/ingress-nginx-migration/#gateway-api diff --git a/self-hosted-spaces_versioned_docs/version-1.16/howtos/managed-service.md b/self-hosted-spaces_versioned_docs/version-1.16/howtos/managed-service.md deleted file mode 100644 index 8c71f77f4..000000000 --- a/self-hosted-spaces_versioned_docs/version-1.16/howtos/managed-service.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: Managed Upbound control planes -description: "Learn about the managed service capabilities of a Space" -sidebar_position: 10 ---- - -Control planes in Upbound are fully isolated [Upbound Crossplane][uxp] instances -that Upbound manages for you. This means: - -- the underlying lifecycle of infrastructure (compute, memory, and storage) required to power your instance. -- scaling of the infrastructure. -- the maintenance of the core Upbound Crossplane components that make up a control plane. - -This lets users focus on building their APIs and operating their control planes, -while Upbound handles the rest. Each control plane has its own dedicated API -server connecting users to their control plane. - -## Learn about Upbound control planes - -Read the [concept][ctp-concept] documentation to learn about Upbound control planes. - -[uxp]: /manuals/uxp/overview -[ctp-concept]: /self-hosted-spaces/concepts/control-planes \ No newline at end of file diff --git a/self-hosted-spaces_versioned_docs/version-1.16/howtos/managed-spaces-deployment.md b/self-hosted-spaces_versioned_docs/version-1.16/howtos/managed-spaces-deployment.md deleted file mode 100644 index 65dd1c292..000000000 --- a/self-hosted-spaces_versioned_docs/version-1.16/howtos/managed-spaces-deployment.md +++ /dev/null @@ -1,267 +0,0 @@ ---- -title: Managed Spaces -sidebar_position: 4 -description: A guide to Upbound Managed Spaces -plan: business -draft: true ---- - - - -import GlobalLanguageSelector, { CodeBlock } from '@site/src/components/GlobalLanguageSelector'; - - - - -:::important -This feature is only available for select Business Critical customers. You can't -set up your own Managed Space without the assistance of Upbound. If you're -interested in this deployment mode, please [contact us][contact]. -::: - - - -A Managed Space deployed on AWS is a single-tenant deployment of a control plane -space in your AWS organization in an isolated sub-account. With Managed Spaces, -you can use the same API, CLI, and Console that Upbound offers, with the benefit -of running entirely in a cloud account that you own and Upbound manages for you. - -The following guide walks you through setting up a Managed Space in your AWS -organization. If you have any questions while working through this guide, -contact your Upbound Account Representative for help. - - - - - -A Managed Space deployed on GCP is a single-tenant deployment of a control plane -space in your GCP organization in an isolated project. With Managed Spaces, you -can use the same API, CLI, and Console that Upbound offers, with the benefit of -running entirely in a cloud account that you own and Upbound manages for you. - -The following guide walks you through setting up a Managed Space in your GCP -organization. If you have any questions while working through this guide, -contact your Upbound Account Representative for help. - - - - -## Managed Space on your cloud architecture - - - -A Managed Space is a deployment of the Upbound Spaces software inside an -Upbound-controlled sub-account in your AWS cloud environment. The Spaces -software runs in this sub-account, orchestrated by Kubernetes. Backups and -billing data get stored inside bucket or blob storage in the same sub-account. -The control planes deployed and controlled by the Spaces software runs on the -Kubernetes cluster which gets deployed into the sub-account. - -The diagram below illustrates the high-level architecture of Upbound Managed Spaces: - -![Upbound Managed Spaces arch](/img/managed-arch-aws.png) - -The Spaces software gets deployed on an EKS Cluster in the region of your -choice. This EKS cluster is where your control planes are ultimately run. -Upbound also deploys buckets, 1 for the collection of the billing data and 1 for -control plane backups. - -Upbound doesn't have access to other sub-accounts nor your organization-level -settings in your cloud environment. Outside of your cloud organization, Upbound -runs the Upbound Console, which includes the Upbound API and web application, -including the dashboard you see at `console.upbound.io`. By default, all -connections are encrypted, but public. Optionally, you also have the option to -use private network connectivity through [AWS PrivateLink][aws-privatelink]. - - - - - - -A Managed Space is a deployment of the Upbound Spaces software inside an -Upbound-controlled project in your GCP cloud environment. The Spaces software -runs in this project, orchestrated by Kubernetes. Backups and billing data get -stored inside bucket or blob storage in the same project. The control planes -deployed and controlled by the Spaces software runs on the Kubernetes cluster -which gets deployed into the project. - -The diagram below illustrates the high-level architecture of Upbound Managed Spaces: - -![Upbound Managed Spaces arch](/img/managed-arch-gcp.png) - -The Spaces software gets deployed on a GKE Cluster in the region of your choice. -This GKE cluster is where your control planes are ultimately run. Upbound also -deploys cloud buckets, 1 for the collection of the billing data and 1 for -control plane backups. - -Upbound doesn't have access to other projects nor your organization-level -settings in your cloud environment. Outside of your cloud organization, Upbound -runs the Upbound Console, which includes the Upbound API and web application, -including the dashboard you see at `console.upbound.io`. By default, all -connections are encrypted, but public. Optionally, you also have the option to -use private network connectivity through [GCP Private Service -Connect][gcp-private-service-connect]. - - - -## Prerequisites - -- An organization created on Upbound - - - -- You should have a preexisting AWS organization to complete this guide. -- You must create a new AWS sub-account. Read the [AWS documentation][aws-documentation] to learn how to create a new sub-account in an existing organization on AWS. - -After the sub-account information gets provided to Upbound, **don't change it -any further.** Any changes made to the sub-account or the resources created by -Upbound for the purposes of the Managed Space deployments voids the SLA you have -with Upbound. If you want to make configuration changes, contact your Upbound -Solutions Architect. - - - - - -- You should have a preexisting GCP organization with an active Cloud Billing account to complete this guide. -- You must create a new GCP project. Read the [GCP documentation][gcp-documentation] to learn how to create a new project in an existing organization on GCP. - -After the project information gets provided to Upbound, **don't change it any -further.** Any changes made to the project or the resources created by Upbound -for the purposes of the Managed Space deployments voids the SLA you have with -Upbound. If you want to make configuration changes, contact your Upbound -Solutions Architect. - - - - - -## Set up cross-account management - -Upbound supports using AWS Key Management Service with cross-account IAM -permissions. This enables the isolation of keys so the infrastructure operated -by Upbound has limited access to symmetric keys. - -In the KMS key's account, apply the baseline key policy: - -```json -{ - "Sid": "Allow Upbound to use this key", - "Effect": "Allow", - "Principal": { - "AWS": ["[Managed Space sub-account ID]"] - }, - "Action": ["kms:Encrypt", "kms:Decrypt", "kms:ReEncrypt*", "kms:GenerateDataKey*", "kms:DescribeKey"], - "Resource": "*" -} -``` - -You need another key policy to let the sub-account create persistent resources -with the KMS key: - -```json -{ - "Sid": "Allow attachment of persistent resources for an Upbound Managed Space", - "Effect": "Allow", - "Principal": { - "AWS": "[Managed Space sub-account ID]" - }, - "Action": ["kms:CreateGrant", "kms:ListGrants", "kms:RevokeGrant"], - "Resource": "*", - "Condition": { - "Bool": { - "kms:GrantIsForAWSResource": "true" - } - } -} -``` - -### Configure PrivateLink - -By default, all connections to the Upbound Console are encrypted, but public. -AWS PrivateLink is a feature that allows VPC peering whereby your traffic -doesn't traverse the public internet. To have this configured, contact your -Upbound Account Representative. - - - - - -## Enable APIs - -Enable the following APIs in the new project: - -- Kubernetes Engine API -- Cloud Resource Manager API -- Compute Engine API -- Cloud DNS API - -:::tip -Read how to enable APIs in a GCP project [here][here]. -::: - -## Create a service account - -Create a service account in the new project. Name the service account, -upbound-sa. Give the service account the following roles: - -- Compute Admin -- Project IAM Admin -- Service Account Admin -- DNS Administrator -- Editor - -Select the service account you just created. Select keys. Add a new key and -select JSON. The key gets downloaded to your machine. Save this for later. - -## Create a DNS Zone - -Create a DNS Zone, set the **Zone type** to `Public`. - -### Configure Private Service Connect - -By default, all connections to the Upbound Console are encrypted, but public. -GCP Private Service Connect is a feature that allows VPC peering whereby your -traffic doesn't traverse the public internet. To have this configured, contact -your Upbound Account Representative. - - - -## Provide information to Upbound - -Once these policies get attached to the key, tell your Upbound Account -Representative, providing them the following: - - - -- the full ARN of the KMS key. -- the name of the organization that you created in Upbound. Use the up CLI command, `up org list`, so see this information. -- Confirmation of which region in AWS you want the deployment to target. - - - - - -- The service account JSON key -- The NS records associated with the DNS name created in the last step. -- the name of the organization that you created in Upbound. Use the up CLI command, `up org list`, so see this information. -- Confirmation of which region in GCP you want the deployment to target. - - - -Once Upbound has this information, the request gets processed in a business day. - -## Use your Managed Space - -Once the Managed Space gets deployed, you can see it in the Space selector when browsing your environment on [`console.upbound.io`][console-upbound-io]. - - - - -[contact]: https://www.upbound.io/contact-us -[aws-privatelink]: #configure-privatelink -[aws-documentation]: https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_create.html#orgs_manage_accounts_create-new -[gcp-private-service-connect]: #configure-private-service-connect -[gcp-documentation]: https://cloud.google.com/resource-manager/docs/creating-managing-organization -[here]: https://cloud.google.com/apis/docs/getting-started#enabling_apis -[console-upbound-io]: https://console.upbound.io/ diff --git a/self-hosted-spaces_versioned_docs/version-1.16/howtos/mcp-connector-guide.md b/self-hosted-spaces_versioned_docs/version-1.16/howtos/mcp-connector-guide.md deleted file mode 100644 index 2f18937b1..000000000 --- a/self-hosted-spaces_versioned_docs/version-1.16/howtos/mcp-connector-guide.md +++ /dev/null @@ -1,164 +0,0 @@ ---- -title: Consume control plane APIs in an app cluster with control plane connector -sidebar_position: 99 -description: A tutorial to configure a Space with Argo to declaratively create and - manage control planes ---- - -In this tutorial, you learn how to configure a Kubernetes app cluster to communicate with a control plane in an Upbound self-hosted Space. - - -The [control plane connector][control-plane-connector] bridges your Kubernetes application clusters---running outside of Upbound--to your control planes running in Upbound. This allows you to interact with your control plane's API right from the app cluster. The claim APIs you define via `CompositeResourceDefinitions` are available alongside Kubernetes workload APIs like `Pod`. In effect, control plane connector provides the same experience as a locally installed Crossplane. - -## Prerequisites - -To complete this tutorial, you need the following: - -- Have already deployed an Upbound Space. -- Have already deployed an Kubernetes cluster (referred to as `app cluster`). - -## Create a control plane - -Create a new control plane in your self-hosted Space. Run the following command in a terminal: - -```bash -up ctp create my-control-plane -``` - -Once the control plane is ready, connect to it. - -```bash -up ctp connect my-control-plane -``` - -For convenience, install a an Upbound [platform reference Configuration][platform-reference-configuration] from the marketplace. For production scenarios, replace this with your own Crossplane Configurations or compositions. - -```bash -up ctp configuration install xpkg.upbound.io/upbound/platform-ref-aws:v1.4.0 -``` - -## Fetch the control plane's connection details - -Run the following command in a terminal: - -```shell -kubectl get secret kubeconfig-my-control-plane -n default -o jsonpath='{.data.kubeconfig}' | base64 -d > kubeconfig-my-control-plane.yaml -``` - -This command saves the kubeconfig for the control plane to a file in your working directory. - -## Install control plane connector in your app cluster - -Switch contexts to your Kubernetes app cluster. To install the control plane connector in your app cluster, you must first provide a secret containing your control plane's kubeconfig at install-time. Run the following command in a terminal: - -:::important -Run the following commands against your **app cluster**, not your control plane. -::: - -```bash -kubectl create secret generic kubeconfig-my-control-plane -n kube-system --from-file=kubeconfig=./kubeconfig-my-control-plane.yaml -``` - -Set the environment variable below to configure which namespace _in your control plane_ you wish to sync the app cluster's claims to. - -```shell -export CONNECTOR_CTP_NAMESPACE=app-cluster-1 -``` - -Install the Control Plane Connector in the app cluster and point it to your control plane. - -```bash -up ctp connector install my-control-plane $CONNECTOR_CTP_NAMESPACE --control-plane-secret=kubeconfig-my-control-plane -``` - -## Inspect your app cluster - -After you install Control Plane Connector in the app cluster, you can now see APIs which live on the control plane. You can confirm this is the case by running the following command on your app cluster: - -```bash {copy-lines="1"} -kubectl api-resources | grep upbound - -# The output should look like this: -sqlinstances aws.platform.upbound.io/v1alpha1 true SQLInstance -clusters aws.platformref.upbound.io/v1alpha1 true Cluster -osss observe.platform.upbound.io/v1alpha1 true Oss -apps platform.upbound.io/v1alpha1 true App -``` - -## Claim a database instance on your app cluster - -Create a database claim against the `SQLInstance` API and observe resources get created by your control plane. Apply the following resources to your app cluster: - -```yaml -cat < --output - ``` - - The command exports your existing Crossplane control plane configuration/state into an archive file. - -::: note -By default, the export command doesn't make any changes to your existing Crossplane control plane state, leaving it intact. Use the `--pause-before-export` flag to pause the reconciliation on managed resources before exporting the archive file. - -This safety mechanism ensures the control plane you migrate state to doesn't assume ownership of resources before you're ready. -::: - -2. Use the control plane [create command][create-command] to create a managed -control plane in Upbound: - - ```bash - up controlplane create my-controlplane - ``` - -3. Use [`up ctx`][up-ctx] to connect to the control plane created in the previous step: - - ```bash - up ctx "///my-controlplane" - ``` - - The command configures your local `kubeconfig` to connect to the control plane. - -4. Run the following command to import the archive file into the control plane: - - ```bash - up controlplane migration import --input - ``` - -:::note -By default, the import command leaves the control plane in an inactive state by pausing the reconciliation on managed -resources. This pause gives you an opportunity to review the imported configuration/state before activating the control plane. -Use the `--unpause-after-import` flag to change the default behavior and activate the control plane immediately after -importing the archive file. -::: - - - -5. Review and validate the imported configuration/state. When you are ready, activate your managed - control plane by running the following command: - - ```bash - kubectl annotate managed --all crossplane.io/paused- - ``` - - At this point, you can delete the source Crossplane control plane. - -## CLI options - -### Filtering - -The migration tool captures the state of a Control Plane. The only filtering -supported is Kubernetes namespace and Kubernetes resource Type filtering. - -You can exclude namespaces using the `--exclude-namespaces` CLI option. This can prevent the CLI from including unwanted resources in the export. - -```bash ---exclude-namespaces=kube-system,kube-public,kube-node-lease,local-path-storage,... - -# A list of specific namespaces to exclude from the export. Defaults to 'kube-system', 'kube-public','kube-node-lease', and 'local-path-storage'. -``` - -You can exclude Kubernetes Resource types by using the `--exclude-resources` CLI option: - -```bash ---exclude-resources=EXCLUDE-RESOURCES,... - -# A list of resource types to exclude from the export in "resource.group" format. No resources are excluded by default. -``` - -For example, here's an example for excluding the CRDs installed by Crossplane functions (since they're not needed): - -```bash -up controlplane migration export \ - --exclude-resources=gotemplates.gotemplating.fn.crossplane.io,kclinputs.template.fn.crossplane.io -``` - -:::warning -You must specify resource names in lowercase "resource.group" format (for example, `gotemplates.gotemplating.fn.crossplane.io`). Using only the resource kind (for example, `GoTemplate`) isn't supported. -::: - - -:::tip Function Input CRDs - -Exclude function input CRDs (`inputs.template.fn.crossplane.io`, `resources.pt.fn.crossplane.io`, `gotemplates.gotemplating.fn.crossplane.io`, `kclinputs.template.fn.crossplane.io`) from migration exports. Upbound automatically recreates these resources during import. Function input CRDs typically have owner references to function packages and may have restricted RBAC access. Upbound installs these CRDs during the import when function packages are restored. - -::: - - -After export, users can also change the archive file to only include necessary resources. - -### Export non-Crossplane resources - -Use the `--include-extra-resources=` CLI option to select other CRD types to include in the export. - -### Set the kubecontext - -Currently `--context` isn't supported in the migration CLI. You should be able to use the `--kubeconfig` CLI option to use a file that's set to the correct context. For example: - -```bash -up controlplane migration export --kubeconfig -``` - -Use this in tandem with `up ctx` to export a control plane's kubeconfig: - -```bash -up ctx --kubeconfig ~/.kube/config - -# To list the current contet -up ctx . --kubeconfig ~/.kube/config -``` - -## Export archive - -The migration CLI exports an archive upon successful completion. Below is an example export of a control plane that excludes several CRD types and skips the confirmation prompt. A file gets written to the working directory, unless you select another output file: - -
- -View the example export - -```bash -$ up controlplane migration export --exclude-resources=gotemplates.gotemplating.fn.crossplane.io,kclinputs.template.fn.crossplane.io --yes -Exporting control plane state... -✓ Scanning control plane for types to export... 121 types found! 👀 -✓ Exporting 121 Crossplane resources...60 resources exported! 📤 -✓ Exporting 3 native resources...8 resources exported! 📤 -✓ Archiving exported state... archived to "xp-state.tar.gz"! 📦 -``` - -
- - -When an export occurs, a file named `xp-state.tar.gz` by default gets created in the working directory. You can unzip the file and all the contents of the export are all text YAML files. - -- Each CRD (for example `vpcs.ec2.aws.upbound.io`) gets its own directory -which contains: - - A `metadata.yaml` file that contains Kubernetes Object Metadata - - A list of Kubernetes Categories the resource belongs to -- A `cluster` directory that contains YAML manifests for all resources provisioned -using the CRD. - -Sample contents for a Cluster with a single `XNetwork` Composite from -[configuration-aws-network][configuration-aws-network] is show below: - - -
- -View the example cluster content - -```bash -├── compositionrevisions.apiextensions.crossplane.io -│ ├── cluster -│ │ ├── kcl.xnetworks.aws.platform.upbound.io-4ca6a8a.yaml -│ │ └── xnetworks.aws.platform.upbound.io-9859a34.yaml -│ └── metadata.yaml -├── configurations.pkg.crossplane.io -│ ├── cluster -│ │ └── configuration-aws-network.yaml -│ └── metadata.yaml -├── deploymentruntimeconfigs.pkg.crossplane.io -│ ├── cluster -│ │ └── default.yaml -│ └── metadata.yaml -├── export.yaml -├── functions.pkg.crossplane.io -│ ├── cluster -│ │ ├── crossplane-contrib-function-auto-ready.yaml -│ │ ├── crossplane-contrib-function-go-templating.yaml -│ │ └── crossplane-contrib-function-kcl.yaml -│ └── metadata.yaml -├── internetgateways.ec2.aws.upbound.io -│ ├── cluster -│ │ └── borrelli-backup-test-xgl4q.yaml -│ └── metadata.yaml -├── mainroutetableassociations.ec2.aws.upbound.io -│ ├── cluster -│ │ └── borrelli-backup-test-t2qh7.yaml -│ └── metadata.yaml -├── namespaces -│ └── cluster -│ ├── crossplane-system.yaml -│ ├── default.yaml -│ └── upbound-system.yaml -├── providerconfigs.aws.upbound.io -│ ├── cluster -│ │ └── default.yaml -│ └── metadata.yaml -├── providerconfigusages.aws.upbound.io -│ ├── cluster -│ │ ├── 0a2a3ec6-ef13-45f9-9cf0-63af7f4a6b6b.yaml -...redacted -│ │ └── f7092b0f-3a78-4bfe-82c8-57e5085a9b11.yaml -│ └── metadata.yaml -├── providers.pkg.crossplane.io -│ ├── cluster -│ │ ├── upbound-provider-aws-ec2.yaml -│ │ └── upbound-provider-family-aws.yaml -│ └── metadata.yaml -├── routes.ec2.aws.upbound.io -│ ├── cluster -│ │ └── borrelli-backup-test-dt9cj.yaml -│ └── metadata.yaml -├── routetableassociations.ec2.aws.upbound.io -│ ├── cluster -│ │ ├── borrelli-backup-test-mr2sd.yaml -│ │ ├── borrelli-backup-test-ngq5h.yaml -│ │ ├── borrelli-backup-test-nrkgg.yaml -│ │ └── borrelli-backup-test-wq752.yaml -│ └── metadata.yaml -├── routetables.ec2.aws.upbound.io -│ ├── cluster -│ │ └── borrelli-backup-test-dv4mb.yaml -│ └── metadata.yaml -├── secrets -│ └── namespaces -│ ├── crossplane-system -│ │ ├── cert-token-signing-gateway-pub.yaml -│ │ ├── mxp-hostcluster-certs.yaml -│ │ ├── package-pull-secret.yaml -│ │ └── xgql-tls.yaml -│ └── upbound-system -│ └── aws-creds.yaml -├── securitygrouprules.ec2.aws.upbound.io -│ ├── cluster -│ │ ├── borrelli-backup-test-472f4.yaml -│ │ └── borrelli-backup-test-qftmw.yaml -│ └── metadata.yaml -├── securitygroups.ec2.aws.upbound.io -│ ├── cluster -│ │ └── borrelli-backup-test-w5jch.yaml -│ └── metadata.yaml -├── storeconfigs.secrets.crossplane.io -│ ├── cluster -│ │ └── default.yaml -│ └── metadata.yaml -├── subnets.ec2.aws.upbound.io -│ ├── cluster -│ │ ├── borrelli-backup-test-8btj6.yaml -│ │ ├── borrelli-backup-test-gbmrm.yaml -│ │ ├── borrelli-backup-test-m7kh7.yaml -│ │ └── borrelli-backup-test-nttt5.yaml -│ └── metadata.yaml -├── vpcs.ec2.aws.upbound.io -│ ├── cluster -│ │ └── borrelli-backup-test-7hwgh.yaml -│ └── metadata.yaml -└── xnetworks.aws.platform.upbound.io -├── cluster -│ └── borrelli-backup-test.yaml -└── metadata.yaml -43 directories, 87 files -``` - -
- - -The `export.yaml` file contains metadata about the export, including the configuration of the export, Crossplane information, and what's included in the export bundle. - -
- -View the export - -```yaml -version: v1alpha1 -exportedAt: 2025-01-06T17:39:53.173222Z -options: - excludedNamespaces: - - kube-system - - kube-public - - kube-node-lease - - local-path-storage - includedResources: - - namespaces - - configmaps - - secrets - excludedResources: - - gotemplates.gotemplating.fn.crossplane.io - - kclinputs.template.fn.crossplane.io -crossplane: - distribution: universal-crossplane - namespace: crossplane-system - version: 1.17.3-up.1 - featureFlags: - - --enable-provider-identity - - --enable-environment-configs - - --enable-composition-functions - - --enable-usages -stats: - total: 68 - nativeResources: - configmaps: 0 - namespaces: 3 - secrets: 5 - customResources: - amicopies.ec2.aws.upbound.io: 0 - amilaunchpermissions.ec2.aws.upbound.io: 0 - amis.ec2.aws.upbound.io: 0 - availabilityzonegroups.ec2.aws.upbound.io: 0 - capacityreservations.ec2.aws.upbound.io: 0 - carriergateways.ec2.aws.upbound.io: 0 - compositeresourcedefinitions.apiextensions.crossplane.io: 0 - compositionrevisions.apiextensions.crossplane.io: 2 - compositions.apiextensions.crossplane.io: 0 - configurationrevisions.pkg.crossplane.io: 0 - configurations.pkg.crossplane.io: 1 -...redacted -``` - -
- -### Skipped resources - -Along with to the resources excluded via CLI options, the following resources aren't -included in the backup: - -- The `kube-root-ca.crt` ConfigMap, since this is cluster-specific -- Resources directly managed via Helm (ArgoCD's helm implementation, which templates -Helm resources and then applies them, get included in the backup). The migration creates the exclusion list by looking for: - - Any Resource with the label `"app.kubernetes.io/managed-by" == "Helm"` - - Kubernetes Secrets with the label prefix `helm.sh/release`. For example, `helm.sh/release.v1` -- Resources installed via a Crossplane package. These have an `ownerReference` with -a prefix `pkg.crossplane.io`. The expectation is that during import, the Crossplane Package Manager bears responsibility for installing the resources. -- Crossplane Locks: Any `Lock.pkg.crossplane.io` resource isn't included in the -export. - -## Restore - -The following is an example of a successful import run. At the end of the import, all Managed Resources are in a paused state. - -
- -View the migration import - -```bash -$ up controlplane migration import -Importing control plane state... -✓ Reading state from the archive... Done! 👀 -✓ Importing base resources... 18 resources imported! 📥 -✓ Waiting for XRDs... Established! ⏳ -✓ Waiting for Packages... Installed and Healthy! ⏳ -✓ Importing remaining resources... 50 resources imported! 📥 -✓ Finalizing import... Done! 🎉 -``` - -
- -Your scenario may involve migrating resources which already exist through other automation on the platform. When executing an import in these circumstances, the importer applies the new manifests to the cluster. If the resource already exists, the restore sets fields to what's in the backup. - -The importer restores all resources in the export archive. Managed Resources get imported with the `crossplane.io/paused: "true"` annotation set. Use the `--unpause-after-import` CLI argument to automatically un-pause resources that got -paused during backup, or remove the annotation manually. - -### Restore order - -The importer restores based on Kubernetes types. The restore order doesn't include parent/child relationships. - -Because Crossplane Composites create new Managed Resources if not present on the cluster, all -Claims, Composites and Managed Resources get imported in a paused state. You can un-pause them after the restore completes. - -The first step of import is installing Base Resources into the cluster. These resources (such has -packages and XRDs) must be ready before proceeding with the import. -Base Resources are: - -- Kubernetes Resources - - ConfigMaps - - Namespaces - - Secrets -- Crossplane Resources - - ControllerConfigs: `controllerconfigs.pkg.crossplane.io` - - DeploymentRuntimeConfigs: `deploymentruntimeconfigs.pkg.crossplane.io` - - StoreConfigs: `storeconfigs.secrets.crossplane.io` -- Crossplane Packages - - Providers: `providers.pkg.crossplane.io` - - Functions: `functions.pkg.crossplane.io` - - Configurations: `configurations.pkg.crossplane.io` - -Restore waits for the base resources to be `Ready` before moving on to the next step. Next, restore walks through the archive and restores all the manifests present. - -During import, the `crossplane.io/paused` annotation gets added to Managed Resources, Claims -and Composites. - -To manually un-pause managed resources after an import, remove the annotation by running: - -```bash -kubectl annotate managed --all crossplane.io/paused- -``` - -You can also run import again with the `--unpause-after-import` flag to remove the annotations. - -```bash -up controlplane migration import --unpause-after-import -``` - -### Restoring resource status - -The importer applies the status of all resources during import. The importer determines if the CRD version has a status field defined based on the stored CRD version. - - -[cli-command]: /reference/cli-reference -[up-cli]: /reference/cli-reference -[up-cli-1]: /manuals/cli/overview -[create-command]: /reference/cli-reference -[up-ctx]: /reference/cli-reference -[configuration-aws-network]: https://marketplace.upbound.io/configurations/upbound/configuration-aws-network diff --git a/self-hosted-spaces_versioned_docs/version-1.16/howtos/mirror-images.md b/self-hosted-spaces_versioned_docs/version-1.16/howtos/mirror-images.md deleted file mode 100644 index c19902f1d..000000000 --- a/self-hosted-spaces_versioned_docs/version-1.16/howtos/mirror-images.md +++ /dev/null @@ -1,83 +0,0 @@ ---- -title: Mirror Spaces images -description: Mirror OCI artifacts for a Spaces version to local storage or a - private registry. ---- - -`up space mirror` copies all OCI artifacts for a given Spaces version to a local -directory (as `.tar.gz` files) or a container registry. - -For full usage, flags, and examples, see the [up space mirror][cli-mirror] -section of the CLI reference. - -## Prerequisites - -* The [Up CLI][cli-reference] installed -* An Upbound token file (JSON with `accessId` and `token`) for registry auth. - Example: - - ```json - { - "accessId": "", - "token": "" - } - ``` - -## Mirror to a local directory - -To export artifacts as `.tar.gz` files into a directory: - -```bash -up space mirror -v --output-dir= --token-file= -``` - -Example: - -```bash -up space mirror -v 1.15.2 --output-dir=/tmp/spaces-artifacts --token-file=upbound-token.json -``` - -## Specify your internal registry - -When you install Upbound with Helm in a proxied environment, please update the specified registry with your internal registry. - - -```bash -helm -n upbound-system upgrade --install spaces \ - oci://xpkg.upbound.io/spaces-artifacts/spaces \ - --version "${SPACES_VERSION}" \ - --set "ingress.host=${SPACES_ROUTER_HOST}" \ - --set "account=${UPBOUND_ACCOUNT}" \ - --set "authentication.hubIdentities=true" \ - --set "authorization.hubRBAC=true" \ - --set "registry=registry.company.corp/spaces" \ - --set "controlPlanes.uxp.registryOverride=registry.company.corp/xpkg.upbound.io" \ - --set "controlPlanes.uxp.repository=registry.company.corp/spaces" \ - --wait -``` - - -## Mirror to a private registry - -To push artifacts to your own container registry: - -1. Log in to the registry (for example, `docker login myregistry.io`). - -2. Run the mirror command with `--destination-registry`: - -```bash -up space mirror -v --destination-registry= --token-file= -``` - -Example: - -```bash -up space mirror -v 1.15.2 --destination-registry=myregistry.io --token-file=upbound-token.json -``` - -:::tip -Use `--dry-run` to list artifacts the command would mirror without copying them. This verifies you have proper access to the Upbound registry. -::: - -[cli-mirror]: /reference/cli-reference#up-space-mirror -[cli-reference]: /reference/cli-reference diff --git a/self-hosted-spaces_versioned_docs/version-1.16/howtos/observability.md b/self-hosted-spaces_versioned_docs/version-1.16/howtos/observability.md deleted file mode 100644 index e627954d5..000000000 --- a/self-hosted-spaces_versioned_docs/version-1.16/howtos/observability.md +++ /dev/null @@ -1,329 +0,0 @@ ---- -title: Observability -sidebar_position: 50 -description: A guide for how to use the integrated observability pipeline feature - in a Space. -plan: "enterprise" ---- - - - -This guide explains how to configure control plane observability in Upbound -Spaces. Upbound provides integrated observability features built on -[OpenTelemetry][opentelemetry] to collect, process, and export logs, metrics, -and traces from workloads running within individual control planes. - - - -:::tip -For self-hosted Space administrators who want to observe the cluster -infrastructure, see the -[Space-level observability guide][space-level-o11y]. That guide covers -infrastructure metrics, router metrics, and [distributed tracing](/self-hosted-spaces/howtos/tracing/overview). -::: - - -## Prerequisites - - -**Control plane observability** is enabled by default. No additional setup is -required. - - - -### Self-hosted Spaces - -1. **Enable the observability feature** when installing Spaces: - ```bash - up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ - ... - --set "observability.enabled=true" - ``` - -Set `features.alpha.observability.enabled=true` instead if using Spaces version -before `v1.14.0`. - -## Control plane observability - -Control plane observability collects telemetry data from workloads running -within individual control planes using `SharedTelemetryConfig` resources. - -The pipeline deploys [OpenTelemetry Collectors][opentelemetry-collectors] per -control plane, defined by a `SharedTelemetryConfig` at the group level. -Collectors pass data to external observability backends. - -:::important -From Spaces `v1.13` and beyond, telemetry only includes user-facing control -plane workloads (Crossplane, providers, functions). - -Self-hosted users can include system workloads (`api-server`, `etcd`) by setting -`observability.collectors.includeSystemTelemetry=true` in Helm. -::: - -:::important -Spaces validates `SharedTelemetryConfig` resources before applying them by -sending telemetry to configured exporters. For self-hosted Spaces, ensure that -`spaces-controller` can reach the exporter endpoints. -::: - -### `SharedTelemetryConfig` - -`SharedTelemetryConfig` is a group-scoped custom resource that defines telemetry -configuration for control planes. - -#### New Relic example - -```yaml -apiVersion: observability.spaces.upbound.io/v1alpha1 -kind: SharedTelemetryConfig -metadata: - name: newrelic - namespace: default -spec: - controlPlaneSelector: - labelSelectors: - - matchLabels: - org: foo - exporters: - otlphttp: - endpoint: https://otlp.nr-data.net - headers: - api-key: YOUR_API_KEY - exportPipeline: - metrics: [otlphttp] - traces: [otlphttp] - logs: [otlphttp] -``` - -#### Datadog Example - -```yaml -apiVersion: observability.spaces.upbound.io/v1alpha1 -kind: SharedTelemetryConfig -metadata: - name: datadog - namespace: default -spec: - controlPlaneSelector: - labelSelectors: - - matchLabels: - org: foo - exporters: - datadog: - api: - site: ${DATADOG_SITE} - key: ${DATADOG_API_KEY} - exportPipeline: - metrics: [datadog] - traces: [datadog] - logs: [datadog] -``` - - - -#### Splunk HEC Example - - - -```yaml -apiVersion: observability.spaces.upbound.io/v1alpha1 -kind: SharedTelemetryConfig -metadata: - name: splunk - namespace: default -spec: - controlPlaneSelector: - labelSelectors: - - matchLabels: - org: foo - exporters: - splunk_hec: - endpoint: https://splunk.example.com:8088/services/collector - token: ${SPLUNK_HEC_TOKEN} - exportPipeline: - metrics: [splunk_hec] - traces: [splunk_hec] - logs: [splunk_hec] -``` - -### Control plane selection - -Use `spec.controlPlaneSelector` to specify which control planes should use the -telemetry configuration. - -#### Label-based selection - -```yaml -spec: - controlPlaneSelector: - labelSelectors: - - matchLabels: - environment: production -``` - -#### Expression-based selection - -```yaml -spec: - controlPlaneSelector: - labelSelectors: - - matchExpressions: - - { key: environment, operator: In, values: [production,staging] } -``` - -#### Name-based selection - -```yaml -spec: - controlPlaneSelector: - names: - - controlplane-dev - - controlplane-staging - - controlplane-prod -``` - -### Manage sensitive data - -:::important -Available from Spaces `v1.10` -::: - -Store sensitive data in Kubernetes secrets and reference them in your -`SharedTelemetryConfig`: - -1. **Create the secret:** - ```bash - kubectl create secret generic sensitive -n \ - --from-literal=apiKey='YOUR_API_KEY' - ``` - -2. **Reference in SharedTelemetryConfig:** - ```yaml - apiVersion: observability.spaces.upbound.io/v1alpha1 - kind: SharedTelemetryConfig - metadata: - name: newrelic - spec: - configPatchSecretRefs: - - name: sensitive - key: apiKey - path: exporters.otlphttp.headers.api-key - controlPlaneSelector: - labelSelectors: - - matchLabels: - org: foo - exporters: - otlphttp: - endpoint: https://otlp.nr-data.net - headers: - api-key: dummy # Replaced by secret value - exportPipeline: - metrics: [otlphttp] - traces: [otlphttp] - logs: [otlphttp] - ``` - -### Telemetry processing - -:::important -Available from Spaces `v1.11` -::: - -Configure processing pipelines to transform telemetry data using the [transform -processor][transform-processor]. - -#### Add labels to metrics - -```yaml -spec: - processors: - transform: - error_mode: ignore - metric_statements: - - context: datapoint - statements: - - set(attributes["newLabel"], "someLabel") - processorPipeline: - metrics: [transform] -``` - -#### Remove labels - -From metrics: -```yaml -processors: - transform: - metric_statements: - - context: datapoint - statements: - - delete_key(attributes, "kubernetes_namespace") -``` - -From logs: -```yaml -processors: - transform: - log_statements: - - context: log - statements: - - delete_key(attributes, "log.file.name") -``` - -#### Modify log messages - -```yaml -processors: - transform: - log_statements: - - context: log - statements: - - set(attributes["original"], body) - - set(body, Concat(["log message:", body], " ")) -``` - -### Monitor status - -Check the status of your `SharedTelemetryConfig`: - -```bash -kubectl get stc -NAME SELECTED FAILED PROVISIONED AGE -datadog 1 0 1 63s -``` - -- `SELECTED`: Number of control planes selected -- `FAILED`: Number of control planes that failed provisioning -- `PROVISIONED`: Number of successfully running collectors - -For detailed status information: - -```bash -kubectl describe stc -``` - -## Supported exporters - -Upbound Spaces supports the following exporters: -- `datadog` - For Datadog integration -- `otlphttp` - General-purpose exporter (used by New Relic, among others) - -- `splunk_hec` - For Splunk HTTP Event Collector integration - -- `debug` - For troubleshooting - -## Considerations - -- **Control plane conflicts**: Each control plane can only use one `SharedTelemetryConfig`. Multiple configs selecting the same control plane conflict. -- **Resource scope**: `SharedTelemetryConfig` resources are group-scoped, allowing different telemetry configurations per group. - -For more advanced configuration options, review the [Helm chart -reference][helm-chart-reference] and [OpenTelemetry Transformation Language -documentation][opentelemetry-transformation-language]. - - -[opentelemetry]: https://opentelemetry.io/ -[opentelemetry-collectors]: https://opentelemetry.io/docs/collector/ -[transform-processor]: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/transformprocessor/README.md -[opentelemetry-transformation-language]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl -[space-level-o11y]: /self-hosted-spaces/howtos/space-observability -[helm-chart-reference]: /reference/spaces-helm-reference/ diff --git a/self-hosted-spaces_versioned_docs/version-1.16/howtos/oidc-configuration.md b/self-hosted-spaces_versioned_docs/version-1.16/howtos/oidc-configuration.md deleted file mode 100644 index 1c03f418f..000000000 --- a/self-hosted-spaces_versioned_docs/version-1.16/howtos/oidc-configuration.md +++ /dev/null @@ -1,284 +0,0 @@ ---- -title: Configure OIDC -sidebar_position: 20 -description: Configure OIDC in your Space ---- -:::important -This guide is only applicable for administrators who've deployed self-hosted Spaces. For general RBAC in Upbound, read [Upbound RBAC][upbound-rbac]. -::: - -Upbound uses the Kubernetes [Structured Authentication Configuration][structured-auth-config] to validate OIDC tokens sent to the API. Upbound stores this -configuration as a `ConfigMap` and authenticates with the Upbound router -component during installation with Helm. - -This guide walks you through how to create and apply an authentication -configuration to validate Upbound with an external identity provider. Each -section focuses on a specific part of the configuration file. - - -## Creating the `AuthenticationConfiguration` file - -First, create a file called `config.yaml` with an `AuthenticationConfiguration` -kind. The `AuthenticationConfiguration` is the initial authentication structure -necessary for Upbound to communicate with your chosen identity provider. - -```yaml -apiVersion: apiserver.config.k8s.io/v1beta1 -kind: AuthenticationConfiguration -jwt: -- issuer: - url: oidc-issuer-url - audiences: - - oidc-client-id - claimMappings: # optional - username: - claim: oidc-username-claim - prefix: oidc-username-prefix - groups: - claim: oidc-groups-claim - prefix: oidc-groups-prefix -``` - - -For detailed configuration options, including the CEL-based token validation, -review the feature [documentation][structured-auth-config]. - - -The `AuthenticationConfiguration` allows you to configure multiple JWT -authenticators as separate issuers. - -### Configure an issuer - -The `jwt` array requires an `issuer` specification and typically contains: - -- A `username` claim mapping -- A `groups` claim mapping -Optionally, the configuration may also include: -- A set of claim validation rules -- A set of user validation rules - -The `issuer` URL must be unique across all configured authenticators. - -```yaml -issuer: - url: https://example.com - discoveryUrl: https://discovery.example.com/.well-known/openid-configuration - certificateAuthority: |- - - audiences: - - client-id-a - - client-id-b - audienceMatchPolicy: MatchAny -``` - -By default, the authenticator assumes the OIDC Discovery URL is -`{issuer.url}/.well-known/openid-configuration`. Most identity providers follow -this structure, and you can omit the `discoveryUrl` field. To use a separate -discovery service, specify the full path to the discovery endpoint in this -field. - -If the CA for the Issuer isn't public, provide the PEM encoded CA for the Discovery URL. - -At least one of the `audiences` entries must match the `aud` claim in the JWT. -For OIDC tokens, this is the Client ID of the application attempting to access -the Upbound API. Having multiple values set allows the same configuration to -apply to multiple client applications, for example the `kubectl` CLI and an -Internal Developer Portal. - -If you specify multiple `audiences` , `audienceMatchPolicy` must equal `MatchAny`. - -### Configure `claimMappings` - -#### Username claim mapping - -By default, the authenticator uses the `sub` claim as the user name. To override this, either: - -- specify *both* `claim` and `prefix`. `prefix` may be explicitly set to the empty string. -or - -- specify a CEL `expression` to calculate the user name. - -```yaml -claimMappings: - username: - claim: "sub" - prefix: "keycloak" - # - expression: 'claims.username + ":external-user"' -``` - - -#### Groups claim mapping - -By default, this configuration doesn't map groups, unless you either: - -- specify both `claim` and `prefix`. `prefix` may be explicitly set to the empty string. -or - -- specify a CEL `expression` that returns a string or list of strings. - - -```yaml -claimMappings: - groups: - claim: "groups" - prefix: "" - # - expression: 'claims.roles.split(",")' -``` - - -### Validation rules - - -Validation rules are outside the scope of this document. Review the -[documentation][structured-auth-config] for more information. Examples include -using CEL expressions to validate authentication such as: - - -- Validating that a token claim has a specific value -- Validating that a token has a limited lifetime -- Ensuring usernames and groups don't contain reserved prefixes - -## Required claims - -To interact with Space and ControlPlane APIs, users must have the `upbound.io/aud` claim set to one of the following: - -| Upbound.io Audience | Notes | -| -------------------------------------------------------- | -------------------------------------------------------------------- | -| `[]` | No Access to Space-level or ControlPlane APIs | -| `['upbound:spaces:api']` | This Identity is only for Space-level APIs | -| `['upbound:spaces:controlplanes']` | This Identity is only for ControlPlane APIs | -| `['upbound:spaces:api', 'upbound:spaces:controlplanes']` | This Identity is for both Space-level and ControlPlane APIs | - - -You can set this claim in two ways: - -- In the identity provider mapped in the ID token. -- Inject in the authenticator with the `jwt.claimMappings.extra` array. - -For example: -```yaml -apiVersion: apiserver.config.k8s.io/v1beta1 -kind: AuthenticationConfiguration -jwt: -- issuer: - url: https://keycloak:8443/realms/master - certificateAuthority: |- - - audiences: - - master-realm - audienceMatchPolicy: MatchAny - claimMappings: - username: - claim: "preferred_username" - prefix: "keycloak:" - groups: - claim: "groups" - prefix: "" - extra: - - key: 'upbound.io/aud' - valueExpression: "['upbound:spaces:controlplanes', 'upbound:spaces:api']" -``` - -## Install the `AuthenticationConfiguration` - -Once you create an `AuthenticationConfiguration` file, specify this file as a -`ConfigMap` in the host cluster for the Upbound Space. - -```sh -kubectl create configmap -n upbound-system --from-file=config.yaml=./path/to/config.yaml -``` - - -To enable OIDC authentication and disable Upbound IAM when installing the Space, -reference the configuration and pass an empty value to the Upbound IAM issuer -parameter: - - -```sh -up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ - ... - --set "authentication.structuredConfig=" \ - --set "router.controlPlane.extraArgs[0]=--upbound-iam-issuer-url=" -``` - -## Configure RBAC - - -In this scenario, the external identity provider handles authentication, but -permissions for Spaces and ControlPlane APIs use standard RBAC objects. - -### Spaces APIs - -The Spaces APIs include: -```yaml -- apiGroups: - - spaces.upbound.io - resources: - - controlplanes - - sharedexternalsecrets - - sharedsecretstores - - backups - - backupschedules - - sharedbackups - - sharedbackupconfigs - - sharedbackupschedules -- apiGroups: - - observability.spaces.upbound.io - resources: - - sharedtelemetryconfigs -``` - -### ControlPlane APIs - - - -Crossplane specifies three [roles][crossplane-managed-clusterroles] for a -ControlPlane: admin, editor, and viewer. These map to the verbs `admin`, `edit`, -and `view` on the `controlplanes/k8s` resource in the `spaces.upbound.io` API -group. - - -### Control access - -The `groups` claim in the `AuthenticationConfiguration` allows you to control -resource access when you create a `ClusterRoleBinding`. A `ClusterRole` defines -the role parameters and a `ClusterRoleBinding` subject. - -The example below allows `admin` permissions for all ControlPlanes to members of -the `ctp-admins` group: - -```yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: allow-ctp-admin -rules: -- apiGroups: - - spaces.upbound.io - resources: - - controlplanes/k8s - verbs: - - admin -``` - -ctp-admins ClusterRoleBinding -```yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: allow-ctp-admin -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: allow-ctp-admin -subjects: -- apiGroup: rbac.authorization.k8s.io - kind: Group - name: ctp-admins -``` - -[structured-auth-config]: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#using-authentication-configuration -[crossplane-managed-clusterroles]: https://github.com/crossplane/crossplane/blob/master/design/design-doc-rbac-manager.md#managed-rbac-clusterroles -[upbound-rbac]: /manuals/platform/rbac diff --git a/self-hosted-spaces_versioned_docs/version-1.16/howtos/query-api.md b/self-hosted-spaces_versioned_docs/version-1.16/howtos/query-api.md deleted file mode 100644 index c55fa4405..000000000 --- a/self-hosted-spaces_versioned_docs/version-1.16/howtos/query-api.md +++ /dev/null @@ -1,315 +0,0 @@ ---- -title: Query API -sidebar_position: 40 -description: Use the `up` CLI to query objects and resources ---- - - - - -Upbound's Query API allows users to inspect objects and resources within their control planes. The read-only `up alpha query` and `up alpha get` CLI commands allow you to gather information on your control planes in a fast and efficient package. These commands follow the [`kubectl` conventions][kubectl-conventions] for filtering, sorting, and retrieving information from your Space. - - - - -## Using the Query API - - -The Query API allows you to retrieve control plane information faster than traditional `kubectl` commands. This feature lets you debug your Crossplane resources with the CLI or within the Upbound Console's enhanced management views. - -### Query within a single control plane - -Use the `up alpha get` command to retrieve information about objects within the current control plane context. This command uses the **Query** endpoint and targets the current control plane. - -To switch between control plane groups, use the [`up ctx` ][up-ctx] and change to your desired context with an interactive prompt or specify with your control plane path: - -```shell -up ctx /// -``` - -You can query within a single control plane with the [`up alpha get` command][up-alpha-get-command] to return more information about a given object within the current kubeconfig context. - -The `up alpha get` command can query resource types and aliases to return objects in your control plane. - -```shell -up alpha get managed -NAME READY SYNCED AGE -custom-account1-5bv5j-sa True True 15m -custom-cluster1-bq6dk-net True True 15m -custom-account1-5bv5j-subnet True True 15m -custom-cluster1-bq6dk-nodepool True True 15m -custom-cluster1-bq6dk-cluster True True 15m -custom-account1-5bv5j-net True True 15m -custom-cluster1-bq6dk-subnet True True 15m -custom-cluster1-bq6dk-sa True True 15m -``` - -The [`-A` flag][a-flag] queries for objects across all namespaces. - -```shell -up alpha get configmaps -A -NAMESPACE NAME AGE -crossplane-system uxp-versions-config 18m -crossplane-system universal-crossplane-config 18m -crossplane-system kube-root-ca.crt 18m -upbound-system kube-root-ca.crt 18m -kube-system kube-root-ca.crt 18m -kube-system coredns 18m -default kube-root-ca.crt 18m -kube-node-lease kube-root-ca.crt 18m -kube-public kube-root-ca.crt 18m -kube-system kube-apiserver-legacy-service-account-token-tracking 18m -kube-system extension-apiserver-authentication 18m -``` - -To query for [multiple resource types][multiple-resource-types], you can add the name or alias for the resource as a comma separated string. - -```shell -up alpha get providers,providerrevisions - -NAME HEALTHY REVISION IMAGE STATE DEP-FOUND DEP-INSTALLED AGE -providerrevision.pkg.crossplane.io/crossplane-contrib-provider-nop-ecc25c121431 True 1 xpkg.upbound.io/crossplane-contrib/provider-nop:v0.2.1 Active 18m -NAME INSTALLED HEALTHY PACKAGE AGE -provider.pkg.crossplane.io/crossplane-contrib-provider-nop True True xpkg.upbound.io/crossplane-contrib/provider-nop:v0.2.1 18m -``` - -### Query multiple control planes - -The [`up alpha query` command][up-alpha-query-command] returns a list of objects of any kind within all the control planes in your Space. This command uses either the **SpaceQuery** or **GroupQuery** endpoints depending on your query scope. The `-A` flag switches the query context from the group level to the entire Space - -The `up alpha query` command accepts resources and aliases to return objects across your group or Space. - -```shell -up alpha query crossplane - -NAME ESTABLISHED OFFERED AGE -compositeresourcedefinition.apiextensions.crossplane.io/xnetworks.platform.acme.co True True 20m -compositeresourcedefinition.apiextensions.crossplane.io/xaccountscaffolds.platform.acme.co True True 20m - - -NAME XR-KIND XR-APIVERSION AGE -composition.apiextensions.crossplane.io/xaccountscaffolds.platform.acme.co XAccountScaffold platform.acme.co/v1alpha1 20m -composition.apiextensions.crossplane.io/xnetworks.platform.acme.co XNetwork platform.acme.co/v1alpha1 20m - - -NAME REVISION XR-KIND XR-APIVERSION AGE -compositionrevision.apiextensions.crossplane.io/xaccountscaffolds.platform.acme.co-5ae9da5 1 XAccountScaffold platform.acme.co/v1alpha1 20m -compositionrevision.apiextensions.crossplane.io/xnetworks.platform.acme.co-414ce80 1 XNetwork platform.acme.co/v1alpha1 20m - -NAME READY SYNCED AGE -nopresource.nop.crossplane.io/custom-cluster1-bq6dk-subnet True True 19m -nopresource.nop.crossplane.io/custom-account1-5bv5j-net True True 19m - -## Output truncated... - -``` - - -The [`--sort-by` flag][sort-by-flag] allows you to return information to your specifications. You can construct your sort order in a JSONPath expression string or integer. - - -```shell -up alpha query crossplane -A --sort-by="{.metadata.name}" - -CONTROLPLANE NAME AGE -default/test deploymentruntimeconfig.pkg.crossplane.io/default 10m - -CONTROLPLANE NAME AGE TYPE DEFAULT-SCOPE -default/test storeconfig.secrets.crossplane.io/default 10m Kubernetes crossplane-system -``` - -To query for multiple resource types, you can add the name or alias for the resource as a comma separated string. - -```shell -up alpha query namespaces,configmaps -A - -CONTROLPLANE NAME AGE -default/test namespace/upbound-system 15m -default/test namespace/crossplane-system 15m -default/test namespace/kube-system 16m -default/test namespace/default 16m - -CONTROLPLANE NAMESPACE NAME AGE -default/test crossplane-system configmap/uxp-versions-config 15m -default/test crossplane-system configmap/universal-crossplane-config 15m -default/test crossplane-system configmap/kube-root-ca.crt 15m -default/test upbound-system configmap/kube-root-ca.crt 15m -default/test kube-system configmap/coredns 16m -default/test default configmap/kube-root-ca.crt 16m - -## Output truncated... - -``` - -The Query API also allows you to return resource types with specific [label columns][label-columns]. - -```shell -up alpha query composite -A --label-columns=crossplane.io/claim-namespace - -CONTROLPLANE NAME SYNCED READY COMPOSITION AGE CLAIM-NAMESPACE -query-api-test/test xeks.argo.discover.upbound.io/test-k7xbk False xeks.argo.discover.upbound.io 51d default - -CONTROLPLANE NAME EXTERNALDNS SYNCED READY COMPOSITION AGE CLAIM-NAMESPACE -spaces-clusters/controlplane-query-api-test-spaces-playground xexternaldns.externaldns.platform.upbound.io/spaces-cluster-0-xd8v2-lhnl7 6.34.2 True True xexternaldns.externaldns.platform.upbound.io 19d default -default/query-api-test xexternaldns.externaldns.platform.upbound.io/space-awg-kine-f7dxq-nkk2q 6.34.2 True True xexternaldns.externaldns.platform.upbound.io 55d default - -## Output truncated... - -``` - -### Query API request format - -The CLI can also return a version of your query request with the [`--debug` flag][debug-flag]. This flag returns the API spec request for your query. - -```shell -up alpha query composite -A -d - -apiVersion: query.spaces.upbound.io/v1alpha1 -kind: SpaceQuery -metadata: - creationTimestamp: null -spec: - cursor: true - filter: - categories: - - composite - controlPlane: {} - limit: 500 - objects: - controlPlane: true - table: {} - page: {} -``` - -For more complex queries, you can interact with the Query API like a Kubernetes-style API by creating a query and applying it with `kubectl`. - -The example below is a query for `claim` resources in every control plane from oldest to newest and returns specific information about those claims. - - -```yaml -apiVersion: query.spaces.upbound.io/v1alpha1 -kind: SpaceQuery -spec: - filter: - categories: - - claim - order: - - creationTimestamp: Asc - cursor: true - count: true - objects: - id: true - controlPlane: true - object: - kind: true - apiVersion: true - metadata: - name: true - uid: true - spec: - containers: - image: true -``` - - -The Query API is served by the Spaces API endpoint. You can use `up ctx` to -switch the kubectl context to the Spaces API ingress. After that, you can use -`kubectl create` and receive the `response` for your query parameters. - - -```shell -kubectl create -f spaces-query.yaml -o yaml -``` - -Your `response` should look similar to this example: - -```yaml {copy-lines="none"} -apiVersion: query.spaces.upbound.io/v1alpha1 -kind: SpaceQuery -metadata: - creationTimestamp: "2024-08-08T14:41:46Z" - name: default -response: - count: 3 - cursor: - next: "" - page: 0 - pageSize: 100 - position: 0 - objects: - - controlPlane: - name: query-api-test - namespace: default - id: default/query-api-test/823b2781-7e70-4d91-a6f0-ee8f455d67dc - object: - apiVersion: spaces.platform.upbound.io/v1alpha1 - kind: Space - metadata: - name: space-awg-kine - resourceVersion: "803868" - uid: 823b2781-7e70-4d91-a6f0-ee8f455d67dc - spec: {} - - controlPlane: - name: test-1 - namespace: test - id: test/test-1/08a573dd-851a-42cc-a600-b6f6ed37ee8d - object: - apiVersion: argo.discover.upbound.io/v1alpha1 - kind: EKS - metadata: - name: test-1 - resourceVersion: "4270320" - uid: 08a573dd-851a-42cc-a600-b6f6ed37ee8d - spec: {} - - controlPlane: - name: controlplane-query-api-test-spaces-playground - namespace: spaces-clusters - id: spaces-clusters/controlplane-query-api-test-spaces-playground/b5a6770f-1f85-4d09-8990-997c84bd4159 - object: - apiVersion: spaces.platform.upbound.io/v1alpha1 - kind: Space - metadata: - name: spaces-cluster-0 - resourceVersion: "1408337" - uid: b5a6770f-1f85-4d09-8990-997c84bd4159 - spec: {} -``` - - -## Query API Explorer - - - -import CrdDocViewer from '@site/src/components/CrdViewer'; - -### Query - -The Query resource allows you to query objects in a single control plane. - - - -### GroupQuery - -The GroupQuery resource allows you to query objects across a group of control planes. - - - -### SpaceQuery - -The SpaceQuery resource allows you to query objects across all control planes in a space. - - - - - - -[documentation]: /self-hosted-spaces/howtos/query-api -[up-ctx]: /reference/cli-reference -[up-alpha-get-command]: /reference/cli-reference -[a-flag]: /reference/cli-reference -[multiple-resource-types]: /reference/cli-reference -[up-alpha-query-command]: /reference/cli-reference -[sort-by-flag]: /reference/cli-reference -[label-columns]: /reference/cli-reference -[debug-flag]: /reference/cli-reference -[kubectl-conventions]: https://kubernetes.io/docs/reference/kubectl/generated/kubectl_get/ diff --git a/self-hosted-spaces_versioned_docs/version-1.16/howtos/scaling-resources.md b/self-hosted-spaces_versioned_docs/version-1.16/howtos/scaling-resources.md deleted file mode 100644 index aba74f0e5..000000000 --- a/self-hosted-spaces_versioned_docs/version-1.16/howtos/scaling-resources.md +++ /dev/null @@ -1,179 +0,0 @@ ---- -title: Scaling vCluster and etcd Resources -weight: 950 -description: A guide for scaling vCluster and etcd resources in self-hosted Spaces -aliases: - - /all-spaces/self-hosted-spaces/scaling-resources - - /spaces/scaling-resources ---- - -In large workloads or control plane migration, you may performance impacting -resource constraints. This guide explains how to scale vCluster and `etcd` -resources for optimal performance in your self-hosted Space. - - -## Signs of resource constraints - -You may need to scale your vCluster or `etcd` resources if you observe: - -- API server timeout errors such as `http: Handler timeout` -- Error messages about `too many requests` and requests to `try again later` -- Operations like provider installation failing with errors like `cannot apply provider package secret` -- vCluster pods experiencing continuous restarts -- API performance degrades with high resource volume - - -## Scaling vCluster resources - - -The vCluster component handles Kubernetes API requests for your control planes. -Deployments with multiple control planes or providers may exceed default resource allocations. - -```yaml -# Default settings -controlPlanes.vcluster.resources.limits.cpu: "3000m" -controlPlanes.vcluster.resources.limits.memory: "3960Mi" -controlPlanes.vcluster.resources.requests.cpu: "170m" -controlPlanes.vcluster.resources.requests.memory: "1320Mi" -``` - -For larger workloads, like migrating from an existing control plane with several -providers, increase these resource limits in your Spaces `values.yaml` file. - -```yaml -controlPlanes: - vcluster: - resources: - limits: - cpu: "4000m" # Increase to 4 cores - memory: "6Gi" # Increase to 6GB memory - requests: - cpu: "500m" # Increase baseline CPU request - memory: "2Gi" # Increase baseline memory request -``` - -## Scaling `etcd` storage - -Kubernetes relies on `etcd` performance, which can lead to IOPS (input/output -operations per second) bottlenecks. Upbound allocates `50Gi` volumes for `etcd` -in cloud environments to ensure adequate IOPS performance. - -```yaml -# Default setting -controlPlanes.etcd.persistence.size: "5Gi" -``` - -For production environments or when migrating large control planes, increase -`etcd` volume size and specify an appropriate storage class: - -```yaml -controlPlanes: - etcd: - persistence: - size: "50Gi" # Recommended for production - storageClassName: "fast-ssd" # Use a high-performance storage class -``` - -### Storage class considerations - -For AWS: -- Use GP3 volumes with adequate IOPS -- For AWS GP3 volumes, IOPS scale with volume size (3000 IOPS baseline) -- For optimal performance, provision at least 32Gi to support up to 16,000 IOPS - -For GCP and Azure: -- Use SSD-based persistent disk types for optimal performance -- Consider premium storage options for high-throughput workloads - -## Scaling Crossplane resources - -Crossplane manages provider resources in your control planes. You may need to increase provider resources for larger deployments: - -```yaml -# Default settings -controlPlanes.uxp.resourcesCrossplane.requests.cpu: "370m" -controlPlanes.uxp.resourcesCrossplane.requests.memory: "400Mi" -``` - - -For environments with many providers or managed resources: - - -```yaml -controlPlanes: - uxp: - resourcesCrossplane: - limits: - cpu: "1000m" # Add CPU limit - memory: "1Gi" # Add memory limit - requests: - cpu: "500m" # Increase CPU request - memory: "512Mi" # Increase memory request -``` - -## High availability configuration - -For production environments, enable High Availability mode to ensure resilience: - -```yaml -controlPlanes: - ha: - enabled: true -``` - -## Best practices for migration scenarios - -When migrating from existing control planes into a self-hosted Space: - -1. **Pre-scale resources**: Scale up resources before performing the migration -2. **Monitor resource usage**: Watch resource consumption during and after migration with `kubectl top pods` -3. **Scale incrementally**: If issues persist, increase resources incrementally until performance stabilizes -4. **Consider storage performance**: `etcd` is sensitive to storage I/O performance - -## Helm values configuration - -Apply these settings through your Spaces Helm values file: - -```yaml -controlPlanes: - vcluster: - resources: - limits: - cpu: "4000m" - memory: "6Gi" - requests: - cpu: "500m" - memory: "2Gi" - etcd: - persistence: - size: "50Gi" - storageClassName: "gp3" # Use your cloud provider's fast storage class - uxp: - resourcesCrossplane: - limits: - cpu: "1000m" - memory: "1Gi" - requests: - cpu: "500m" - memory: "512Mi" - ha: - enabled: true # For production environments -``` - -Apply the configuration using Helm: - -```bash -helm upgrade --install spaces oci://xpkg.upbound.io/spaces-artifacts/spaces \ - -f values.yaml \ - -n upbound-system -``` - -## Considerations - -- **Provider count**: Each provider adds resource overhead - consider using provider families to optimize resource usage -- **Managed resources**: The number of managed resources impacts CPU usage more than memory -- **Vertical pod autoscaling**: Consider using vertical pod autoscaling in Kubernetes to automatically adjust resources based on usage -- **Storage performance**: Storage performance is as important as capacity for etcd -- **Network latency**: Low-latency connections between components improve performance - - diff --git a/self-hosted-spaces_versioned_docs/version-1.16/howtos/secrets-management.md b/self-hosted-spaces_versioned_docs/version-1.16/howtos/secrets-management.md deleted file mode 100644 index 82f43eae1..000000000 --- a/self-hosted-spaces_versioned_docs/version-1.16/howtos/secrets-management.md +++ /dev/null @@ -1,727 +0,0 @@ ---- -title: Secrets Management -sidebar_position: 20 -description: A guide for how to configure synchronizing external secrets into control - planes in a Space. ---- - -:::important -This feature is in **Preview** -::: - -Upbound's _Shared Secrets_ is a built in secrets management feature that -provides an integrated way to manage secrets across your platform. It allows you -to store sensitive data like passwords and certificates for your managed control -planes as secrets in an external secret store. - -This feature is a wrapper around the [External Secrets Operator (ESO)][external-secrets-operator-eso] that pulls secrets from external vaults and distributes them across your platform. - - -## Benefits - -The Shared Secrets feature allows you to: - -* Access secrets from a variety of external secret stores without operation overhead -* Configure synchronization for multiple control planes in a group -* Store and manage all your secrets centrally -* Use Shared Secrets across all Upbound environments(Cloud and Disconnected Spaces) -* Synchronize secrets across groups of control planes while maintaining clear security boundaries -* Manage secrets at scale programmatically while ensuring proper isolation and access control - -## Understanding the Architecture - -The Shared Secrets feature uses a hierarchical approach to centrally manage -secrets and effectively control their distribution. - -![Shared Secrets workflow diagram](/img/shared-secrets-workflow.png) - -1. The flow begins at the group level, where you define your secret sources and distribution rules -2. These rules automatically create corresponding resources in your control planes -3. In each control plane, specific namespaces receive the secrets -4. Changes at the group level automatically propagate through this chain - -## Enable shared Secrets - -To enable this feature in a self-hosted Space, set -`features.alpha.sharedSecrets.enabled=true` when installing the Space: - -```bash -up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ - ... - --set "features.alpha.sharedSecrets.enabled=true" \ -``` - - -## Component configuration - -Upbound Shared Secrets consists of two components: - -1. **SharedSecretStore**: Defines connections to external secret providers -2. **SharedExternalSecret**: Specifies which secrets to synchronize and where - - -### Connect to an External Vault - - -The `SharedSecretStore` component is the connection point to your external -secret vaults. It provisions ClusterSecretStore resources into control planes -within the group. - - -#### AWS Secrets Manager - - - -In this example, you'll create a `SharedSecretStore` to connect to AWS Secrets Manager in `us-west-2`. Then apply access to all control planes labeled with `environment: production`, and make these secrets available in the `default` and `crossplane-system` namespaces. - - -You can configure access to AWS Secrets Manager using static credentials or -workload identity. - -:::important -While the underlying ESO API supports more auth methods, static credentials are currently the only supported auth method in Cloud Spaces. -::: - -##### Static credentials - -1. Use the AWS CLI to create access credentials. - - -2. Create your access credentials. -```ini -# Create a text file with AWS credentials -cat > aws-credentials.txt << EOF -[default] -aws_access_key_id = -aws_secret_access_key = -EOF -``` - -3. Next,store the access credentials in a secret in the namespace you want to have access to the `SharedSecretStore`. -```shell -kubectl create secret \ - generic aws-credentials \ - -n default \ - --from-file=creds=./aws-credentials.txt -``` - -4. Create a `SharedSecretStore` custom resource file called `secretstore.yaml`. - Paste the following configuration: -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedSecretStore -metadata: - name: aws-secrets -spec: - # Define which control planes should receive this configuration - controlPlaneSelector: - labelSelectors: - - matchLabels: - environment: production - - # Define which namespaces within those control planes can access secrets - namespaceSelector: - names: - - default - - crossplane-system - - # Configure the connection to AWS Secrets Manager - provider: - aws: - service: SecretsManager - region: us-west-2 - auth: - secretRef: - accessKeyIDSecretRef: - name: aws-credentials - key: access-key-id - secretAccessKeySecretRef: - name: aws-credentials - key: secret-access-key -``` - - - -##### Workload Identity with IRSA - - - -You can also use AWS IAM Roles for Service Accounts (IRSA) depending on your -organizations needs: - -1. Ensure you have deployed the Spaces software into an IRSA-enabled EKS cluster. -2. Follow the AWS instructions to create an IAM OIDC provider with your EKS OIDC - provider URL. -3. Determine the Spaces-generated `controlPlaneID` of your control plane: -```shell -kubectl get controlplane -o jsonpath='{.status.controlPlaneID}' -``` - -4. Create an IAM trust policy in your AWS account to match the control plane. -```yaml -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": "arn:aws:iam:::oidc-provider/" - }, - "Action": "sts:AssumeRoleWithWebIdentity", - "Condition": { - "StringEquals": { - ":aud": "sts.amazonaws.com", - ":sub": [ -"system:serviceaccount:mxp--system:external-secrets-controller"] - } - } - } - ] -} -``` - -5. Update your Spaces deployment to annotate the SharedSecrets service account - with the role ARN. -```shell -up space upgrade ... \ - --set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."eks\.amazonaws\.com/role-arn"="" -``` - -6. Create a SharedSecretStore and reference the SharedSecrets service account: -```ini {copy-lines="all"} -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedSecretStore -metadata: - name: aws-sm - namespace: default -spec: - provider: - aws: - service: SecretsManager - region: - auth: - jwt: - serviceAccountRef: - name: external-secrets-controller - controlPlaneSelector: - names: - - - namespaceSelector: - names: - - default -``` - -When you create a `SharedSecretStore` the underlying mechanism: - -1. Applies at the group level -2. Determines which control planes should receive this configuration by the `controlPlaneSelector` -3. Automatically creates a ClusterSecretStore inside each identified control plane -4. Maintains a connection in each control plane with the ClusterSecretStore - credentials and configuration from the parent SharedSecretStore - -Upbound automatically generates a ClusterSecretStore in each matching control -plane when you create a SharedSecretStore. - -```yaml {copy-lines="none"} -# Automatically created in each matching control plane -apiVersion: external-secrets.io/v1beta1 -kind: ClusterSecretStore -metadata: - name: aws-secrets # Name matches the parent SharedSecretStore -spec: - provider: - upboundspaces: - storeRef: - name: aws-secret -``` - -When you create the SharedSecretStore controller, it replaces the provider with -a special provider called `upboundspaces`. This provider references the -SharedSecretStore object in the Spaces API. This avoids copying the actual cloud -credentials from Spaces to each control plane. - -This workflow allows you to configure the store connection only once at the -group level and automatically propagates to each control plane. Individual control -planes can use the store without exposure to the group-level configuration and -updates all child ClusterSecretStores when updated. - - -#### Azure Key Vault - - -:::important -While the underlying ESO API supports more auth methods, static credentials are currently the only supported auth method in Cloud Spaces. -::: - -##### Static credentials - -1. Use the Azure CLI to create a service principal and authentication file. -2. Create a service principal and save credentials in a file: -```json -{ - "appId": "myAppId", - "displayName": "myServicePrincipalName", - "password": "myServicePrincipalPassword", - "tenant": "myTentantId" -} -``` - -3. Store the credentials as a Kubernetes secret: -```shell -kubectl create secret \ - generic azure-secret-sp \ - -n default \ - --from-file=creds=./azure-credentials.json -``` - -4. Create a SharedSecretStore referencing these credentials: -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedSecretStore -metadata: - name: azure-kv -spec: - provider: - azurekv: - tenantId: "" - vaultUrl: "" - authSecretRef: - clientId: - name: azure-secret-sp - key: ClientID - clientSecret: - name: azure-secret-sp - key: ClientSecret - controlPlaneSelector: - names: - - - namespaceSelector: - names: - - default -``` - -##### Workload Identity - - -You can also use Entra Workload Identity Federation to access Azure Key Vault -without needing to manage secrets. - -To use Entra Workload ID with AKS: - - -1. Deploy the Spaces software into a [workload identity-enabled AKS cluster][workload-identity-enabled-aks-cluster]. -2. Retrieve the OIDC issuer URL of the AKS cluster: -```ini -az aks show --name "" \ - --resource-group "" \ - --query "oidcIssuerProfile.issuerUrl" \ - --output tsv -``` - -3. Use the Azure CLI to make a managed identity: -```ini -az identity create \ - --name "" \ - --resource-group "" \ - --location "" \ - --subscription "" -``` - -4. Look up the managed identity's client ID: -```ini -az identity show \ - --resource-group "" \ - --name "" \ - --query 'clientId' \ - --output tsv -``` - -5. Update your Spaces deployment to annotate the SharedSecrets service account with the associated Entra application client ID from the previous step: -```ini -up space upgrade ... \ - --set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."azure\.workload\.identity/client-id"="" \ - --set-string controlPlanes.sharedSecrets.pod.customLabels."azure\.workload\.identity/use"="true" -``` - -6. Determine the Spaces-generated `controlPlaneID` of your control plane. When you deploy a `kind: controlplane` in a Space, the Spaces software deploys a set of pods in a new namespace following the format `mxp--system`. -```ini -kubectl get controlplane -o jsonpath='{.status.controlPlaneID}' -``` - -7. Create a federated identity credential. -```ini -FEDERATED_IDENTITY_CREDENTIAL_NAME= -USER_ASSIGNED_IDENTITY_NAME= -RESOURCE_GROUP= -AKS_OIDC_ISSUER= -CONTROLPLANE_ID= -az identity federated-credential create --name ${FEDERATED_IDENTITY_CREDENTIAL_NAME} --identity-name "${USER_ASSIGNED_IDENTITY_NAME}" --resource-group "${RESOURCE_GROUP}" --issuer "${AKS_OIDC_ISSUER}" --subject system:serviceaccount:"mxp-${CONTROLPLANE_ID}-system:external-secrets-controller" --audience api://AzureADTokenExchange -``` - -8. Assign the `Key Vault Secrets User` role to the user-assigned managed identity that you created earlier. This step gives the managed identity permission to read secrets from the key vault: -```ini -az role assignment create \ - --assignee-object-id "${IDENTITY_PRINCIPAL_ID}" \ - --role "Key Vault Secrets User" \ - --scope "${KEYVAULT_RESOURCE_ID}" \ - --assignee-principal-type ServicePrincipal -``` - -:::important -You must manually restart a workload's pod when you add the annotation to the running pod's service account. The Entra workload identity mutating admission webhook requires a restart to inject the necessary environment. -::: - -8. Create a `SharedSecretStore`. Replace `vaultURL` with the URL of your Azure Key Vault instance. Replace `identityId` with the client ID of the managed identity created earlier: -```yaml {copy-lines="all"} -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedSecretStore -metadata: - name: azure-kv -spec: - provider: - azurekv: - authType: WorkloadIdentity - vaultUrl: "" - controlPlaneSelector: - names: - - - namespaceSelector: - names: - - default -``` - - - - -#### Google Cloud Secret Manager - - - -You can configure access to Google Cloud Secret Manager using static credentials or workload identity. Below are instructions for configuring either. See the [ESO provider API][eso-provider-api] for more information. - -:::important -While the underlying ESO API supports more auth methods, static credentials are currently the only supported auth method in Cloud Spaces. -::: - -##### Static credentials - -1. Use the [GCP CLI][gcp-cli] to create access credentials. -2. Save the output in a file called `gcp-credentials.json`. -3. Store the access credentials in a secret in the same namespace as the `SharedSecretStore`. - ```shell {label="kube-create-secret",copy-lines="all"} - kubectl create secret \ - generic gcpsm-secret \ - -n default \ - --from-file=creds=./gcp-credentials.json - ``` - -4. Create a `SharedSecretStore`, referencing the secret created earlier. Replace `projectID` with your GCP Project ID: -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedSecretStore -metadata: - name: gcp-sm -spec: - provider: - gcpsm: - auth: - secretRef: - secretAccessKeySecretRef: - name: gcpsm-secret - key: creds - projectID: - controlPlaneSelector: - names: - - - namespaceSelector: - names: - - default -``` - -:::tip -The example above maps a Shared Secret Store into a single namespace of a single control plane. Read [control plane selection][control-plane-selection] and [namespace selection][namespace-selection] to learn how to map into one or more namespaces of one or more control planes. -::: - - -##### Workload identity with Service Accounts to IAM Roles - - -To configure, grant the `roles/iam.workloadIdentityUser` role to the Kubernetes -service account in the control plane namespace to impersonate the IAM service -account. - -1. Ensure you've deployed Spaces on a [Workload Identity Federation-enabled][workload-identity-federation-enabled] GKE cluster. -2. Determine the Spaces-generated `controlPlaneID` of your control plane. When you deploy a `kind: controlplane` in a Space, the Spaces software deploys a set of pods in a new namespace following the format `mxp--system`. -```ini -kubectl get controlplane -o jsonpath='{.status.controlPlaneID}' -``` - -3. Create a GCP IAM service account with the [GCP CLI][gcp-cli-1]: -```ini -gcloud iam service-accounts create \ - --project= -``` - -4. Grant the IAM service account the role to access GCP Secret Manager: -```ini -SA_NAME= -IAM_SA_PROJECT_ID= -gcloud projects add-iam-policy-binding IAM_SA_PROJECT_ID \ - --member "serviceAccount:SA_NAME@IAM_SA_PROJECT_ID.iam.gserviceaccount.com" \ - --role roles/secretmanager.secretAccessor -``` - -5. When you enable the Shared Secrets feature, a service account gets created in each control plane for the External Secrets Operator. Apply a [GCP IAM policy binding][gcp-iam-policy-binding] to associate this service account with the desired GCP IAM role. -```ini -PROJECT_ID= -PROJECT_NUMBER= -CONTROLPLANE_ID= -gcloud projects add-iam-policy-binding projects/${PROJECT_ID} \ - --role "roles/iam.workloadIdentityUser" \ - --member=principal://iam.googleapis.com/projects/${PROJECT_NUMBER}/locations/global/workloadIdentityPools/${PROJECT_ID}.svc.id.goog/subject/ns/mxp-${CONTROLPLANE_ID}-system/sa/external-secrets-controller -``` - -6. Update your Spaces deployment to annotate the SharedSecrets service account with GCP IAM service account's identifier: -```ini -up space upgrade ... \ - --set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."iam\.gke\.io/gcp-service-account"="" -``` - -7. Create a `SharedSecretStore`. Replace `projectID` with your GCP Project ID: -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedSecretStore -metadata: - name: gcp-sm -spec: - provider: - gcpsm: - projectID: - controlPlaneSelector: - names: - - - namespaceSelector: - names: - - default -``` - -:::tip -The example above maps a Shared Secret Store into a single namespace of a single control plane. Read [control plane selection][control-plane-selection-1] and [namespace selection][namespace-selection-2] to learn how to map into one or more namespaces of one or more control planes. -::: - -### Manage your secret distribution - -After you create your SharedSecretStore, you can define which secrets to -distribute using SharedExternalSecret: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedExternalSecret -metadata: - name: database-credentials - namespace: default -spec: - # Select the same control planes as your SharedSecretStore - controlPlaneSelector: - labelSelectors: - - matchLabels: - environment: production - - externalSecretSpec: - refreshInterval: 1h - secretStoreRef: - name: aws-secrets # References the SharedSecretStore name - kind: ClusterSecretStore - target: - name: db-credentials - data: - - secretKey: username - remoteRef: - key: prod/database/credentials - property: username - - secretKey: password - remoteRef: - key: prod/database/credentials - property: password -``` - -This configuration: - -* Pulls database credentials from your external secret provider -* Creates secrets in all production control planes -* Refreshes the secrets every hour -* Creates a secret called `db-credentials` in each control plane - -When you create a SharedExternalSecret at the group level, Upbound's system -creates a template for the corresponding ClusterExternalSecrets in each selected -control plane. - -The example below simulates the ClusterExternalSecret that Upbound creates: - -```yaml -# Inside each matching control plane: -apiVersion: external-secrets.io/v1beta1 -kind: ClusterExternalSecret -metadata: - name: database-credentials -spec: - refreshInterval: 1h - secretStoreRef: - name: aws-secrets - kind: ClusterSecretStore - data: - - secretKey: username - remoteRef: - key: prod/database/credentials - property: username -``` - -The hierarchy in this configuration is: - -1. SharedExternalSecret (group level) defines what secrets to distribute -2. ClusterExternalSecret (control plane level) manages the distribution within - each control plane - -3. Kubernetes Secrets (namespace level) are created in specified namespaces - - -#### Control plane selection - -To configure which control planes in a group you want to project a SecretStore into, use the `spec.controlPlaneSelector` field. You can either use `labelSelectors` or the `names` of a control plane directly. A control plane matches if any of the label selectors match. - -This example matches all control planes in the group that have `environment: production` as a label: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedSecretStore -metadata: - name: my-secret-store -spec: - controlPlaneSelector: - labelSelectors: - - matchLabels: - environment: production -``` - -You can use the more complex `matchExpressions` to match labels based on an expression. This example matches control planes that have label `environment: production` or `environment: staging`: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedSecretStore -metadata: - name: my-secret-store -spec: - controlPlaneSelector: - labelSelectors: - - matchExpressions: - - { key: environment, operator: In, values: [production,staging] } -``` - -You can also specify the names of control planes directly: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedSecretStore -metadata: - name: my-secret-store -spec: - controlPlaneSelector: - names: - - controlplane-dev - - controlplane-staging - - controlplane-prod -``` - - -#### Namespace selection - -To configure which namespaces **within each matched control plane** to project the secret store into, use `spec.namespaceSelector` field. The projected secret store only appears in the namespaces matching the provided selector. You can either use `labelSelectors` or the `names` of namespaces directly. A control plane matches if any of the label selectors match. - -**For all control planes matched by** `spec.controlPlaneSelector`, This example matches all namespaces in each selected control plane that have `team: team1` as a label: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedSecretStore -metadata: - name: my-secret-store -spec: - namespaceSelector: - labelSelectors: - - matchLabels: - team: team1 -``` - -You can use the more complex `matchExpressions` to match labels based on an expression. This example matches namespaces that have label `team: team1` or `team: team2`: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedSecretStore -metadata: - name: my-secret-store -spec: - namespaceSelector: - labelSelectors: - - matchExpressions: - - { key: team, operator: In, values: [team1,team2] } -``` - -You can also specify the names of namespaces directly: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedSecretStore -metadata: - name: my-secret-store -spec: - namespaceSelector: - names: - - team1-namespace - - team2-namespace -``` - -## Configure secrets directly in a control plane - - -The above explains using group-scoped resources to project secrets into multiple control planes. You can also use ESO API types directly in a control plane as you would in standalone Crossplane or Kubernetes. - - -See the [ESO documentation][eso-documentation] for a full guide on using the API types. - -## Best practices - -When you configure secrets management in your Upbound environment, keep the -following best practices in mind: - -**Use consistent labeling schemes** across your control planes for predictable -and manageable secret distribution. - -**Organize your secrets** in your external provider using a hierarchical -structure that mirrors your control plane organization. - -**Set appropriate refresh intervals** based on your security requires and the -nature of the secrets. - -**Use namespace selection sparingly** to limit secret distribution to only the -namespaces that need them. - -**Use separate tokens for each environment.** Keep them in distinct -SharedSecretStores. Users could bypass SharedExternalSecret selectors by -creating ClusterExternalSecrets directly in control planes. This grants access to all -secrets available to that token. - -**Document your secret management architecture**, including which control planes -should receive which secrets. - -[control-plane-selection]: #control-plane-selection -[namespace-selection]: #namespace-selection -[control-plane-selection-1]: #control-plane-selection -[namespace-selection-2]: #namespace-selection - -[external-secrets-operator-eso]: https://external-secrets.io -[workload-identity-enabled-aks-cluster]: https://learn.microsoft.com/en-us/azure/aks/workload-identity-deploy-cluster -[eso-provider-api]: https://external-secrets.io/latest/provider/google-secrets-manager/ -[gcp-cli]: https://cloud.google.com/iam/docs/creating-managing-service-account-keys -[workload-identity-federation-enabled]: https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity#enable_on_clusters_and_node_pools -[gcp-cli-1]: https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity#kubernetes-sa-to-iam -[gcp-iam-policy-binding]: https://cloud.google.com/sdk/gcloud/reference/iam/service-accounts/add-iam-policy-binding -[eso-documentation]: https://external-secrets.io/latest/introduction/getting-started/ diff --git a/self-hosted-spaces_versioned_docs/version-1.16/howtos/self-hosted-spaces-deployment.md b/self-hosted-spaces_versioned_docs/version-1.16/howtos/self-hosted-spaces-deployment.md deleted file mode 100644 index a783bba17..000000000 --- a/self-hosted-spaces_versioned_docs/version-1.16/howtos/self-hosted-spaces-deployment.md +++ /dev/null @@ -1,488 +0,0 @@ ---- -title: Deployment Workflow -sidebar_position: 3 -description: A quickstart guide for Upbound Spaces -tier: "business" ---- -import GlobalLanguageSelector, { CodeBlock } from '@site/src/components/GlobalLanguageSelector'; - - - - - -This guide deploys a self-hosted Upbound cluster in AWS. - - - - - -This guide deploys a self-hosted Upbound cluster in Azure. - - - - - -This guide deploys a self-hosted Upbound cluster in GCP. - - - -Disconnected Spaces allows you to host control planes in your preferred environment. - -## Prerequisites - -To get started deploying your own Disconnected Space, you need: - -- An Upbound organization account string, provided by your Upbound account representative -- A `token.json` license, provided by your Upbound account representative - - - -- An AWS account and the AWS CLI - - - - - -- An Azure account and the Azure CLI - - - - - -- An GCP account and the GCP CLI - - - - -:::important -Disconnected Spaces are a business critical feature of Upbound and requires a license token to successfully complete the installation. [Contact Upbound][contact-upbound] if you want to try out Upbound with Disconnected Spaces. -::: - - -## Provision the hosting environment - -### Create a cluster - - - -Configure the name and target region you want the EKS cluster deployed to. - -```ini -export SPACES_CLUSTER_NAME=upbound-space-quickstart -export SPACES_REGION=us-east-1 -``` - -Provision a 3-node cluster using eksctl. - -```bash -cat < - - - -Configure the name and target region you want the AKS cluster deployed to. - -```ini -export SPACES_RESOURCE_GROUP_NAME=upbound-space-quickstart -export SPACES_CLUSTER_NAME=upbound-space-quickstart -export SPACES_LOCATION=westus -``` - -Provision a new Azure resource group. - -```bash -az group create --name ${SPACES_RESOURCE_GROUP_NAME} --location ${SPACES_LOCATION} -``` - -Provision a 3-node cluster. - -```bash -az aks create -g ${SPACES_RESOURCE_GROUP_NAME} -n ${SPACES_CLUSTER_NAME} \ - --enable-managed-identity \ - --node-count 3 \ - --node-vm-size Standard_D4s_v4 \ - --enable-addons monitoring \ - --enable-msi-auth-for-monitoring \ - --generate-ssh-keys \ - --network-plugin kubenet \ - --network-policy calico -``` - -Get the kubeconfig of your AKS cluster. - -```bash -az aks get-credentials --resource-group ${SPACES_RESOURCE_GROUP_NAME} --name ${SPACES_CLUSTER_NAME} -``` - - - - - -Configure the name and target region you want the GKE cluster deployed to. - -```ini -export SPACES_PROJECT_NAME=upbound-spaces-project -export SPACES_CLUSTER_NAME=upbound-spaces-quickstart -export SPACES_LOCATION=us-west1-a -``` - -Create a new project and set it as the current project. - -```bash -gcloud projects create ${SPACES_PROJECT_NAME} -gcloud config set project ${SPACES_PROJECT_NAME} -``` - -Provision a 3-node cluster. - -```bash -gcloud container clusters create ${SPACES_CLUSTER_NAME} \ - --enable-network-policy \ - --num-nodes=3 \ - --zone=${SPACES_LOCATION} \ - --machine-type=e2-standard-4 -``` - -Get the kubeconfig of your GKE cluster. - -```bash -gcloud container clusters get-credentials ${SPACES_CLUSTER_NAME} --zone=${SPACES_LOCATION} -``` - - - -## Configure the pre-install - -### Set your Upbound organization account details - -Set your Upbound organization account string as an environment variable for use in future steps - -```ini -export UPBOUND_ACCOUNT= -``` - -### Set up pre-install configurations - -Export the path of the license token JSON file provided by your Upbound account representative. - -```ini {copy-lines="2"} -# Change the path to where you saved the token. -export SPACES_TOKEN_PATH="/path/to/token.json" -``` - -Set the version of Spaces software you want to install. - - -```ini -export SPACES_VERSION="1.16.0" -``` - -Set the router host and cluster type. The `SPACES_ROUTER_HOST` is the domain name that's used to access the control plane instances. It's used by the load balancer or ingress to route requests. - -```ini -export SPACES_ROUTER_HOST="proxy.upbound-127.0.0.1.nip.io" -``` - -:::important -Make sure to replace the placeholder text in `SPACES_ROUTER_HOST` and provide a real domain that you own. -::: - - -## Install the Spaces software - - -### Install cert-manager - -Install cert-manager. Replace `` with the [latest release](https://github.com/cert-manager/cert-manager/releases). - -```bash -kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download//cert-manager.yaml -kubectl wait deployment -n cert-manager cert-manager-webhook --for condition=Available=True --timeout=360s -``` - - - -### Install AWS Load Balancer Controller - -The AWS Load Balancer Controller provisions a Network Load Balancer when you expose the spaces-router Service with the annotations in the next step. - -```bash -helm install aws-load-balancer-controller aws-load-balancer-controller --namespace kube-system \ - --repo https://aws.github.io/eks-charts \ - --set clusterName=${SPACES_CLUSTER_NAME} \ - --set serviceAccount.create=false \ - --set serviceAccount.name=aws-load-balancer-controller \ - --wait -``` - - - -### Expose Spaces with LoadBalancer (recommended) - -This guide exposes Spaces using a LoadBalancer Service on the spaces-router. You don't need an ingress or gateway. - -:::important -Use a Network Load Balancer (L4), not an Application Load Balancer (L7). Spaces uses long-lived connections for watch traffic that L7 load balancers may timeout. -::: - -Create a values file using `SPACES_ROUTER_HOST` (or use the same `--set` flags in the Helm install below). Run the command for your cloud: - - - -```bash -cat < values.yaml -externalTLS: - host: ${SPACES_ROUTER_HOST} - -router: - proxy: - service: - type: LoadBalancer - annotations: - service.beta.kubernetes.io/aws-load-balancer-type: external - service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing - service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip -EOF -``` - - - - - -```bash -cat < values.yaml -externalTLS: - host: ${SPACES_ROUTER_HOST} - -router: - proxy: - service: - type: LoadBalancer - # Azure uses L4 by default; add annotations if needed for your setup - annotations: {} -EOF -``` - - - - - -```bash -cat < values.yaml -externalTLS: - host: ${SPACES_ROUTER_HOST} - -router: - proxy: - service: - type: LoadBalancer - annotations: - cloud.google.com/l4-rbs: enabled -EOF -``` - - - -:::tip -To use Gateway API or Ingress instead of LoadBalancer, see [Exposing Spaces externally][expose]. -::: - -### Install Upbound Spaces software - -Create an image pull secret so that the cluster can pull Upbound Spaces images. - -```bash -kubectl create ns upbound-system -kubectl -n upbound-system create secret docker-registry upbound-pull-secret \ - --docker-server=https://xpkg.upbound.io \ - --docker-username="$(jq -r .accessId $SPACES_TOKEN_PATH)" \ - --docker-password="$(jq -r .token $SPACES_TOKEN_PATH)" -``` - -Log in with Helm to be able to pull chart images for the installation commands. - -```bash -jq -r .token $SPACES_TOKEN_PATH | helm registry login xpkg.upbound.io -u $(jq -r .accessId $SPACES_TOKEN_PATH) --password-stdin -``` - -Install the Spaces software. Use the `values.yaml` file you created in the previous step (with `externalTLS.host` and `router.proxy.service`). - -```bash -helm -n upbound-system upgrade --install spaces \ - oci://xpkg.upbound.io/spaces-artifacts/spaces \ - --version "${SPACES_VERSION}" \ - -f values.yaml \ - --set "account=${UPBOUND_ACCOUNT}" \ - --set "authentication.hubIdentities=true" \ - --set "authorization.hubRBAC=true" \ - --wait -``` - -### Create a DNS record - -:::important -Create a DNS record for the spaces-router load balancer before you create your first control plane. -::: - -Get the load balancer address for the spaces-router Service: - - - -```bash -kubectl get svc -n upbound-system spaces-router \ - -o jsonpath='{.status.loadBalancer.ingress[0].hostname}' -``` - - - - - -```bash -kubectl get svc -n upbound-system spaces-router \ - -o jsonpath='{.status.loadBalancer.ingress[0].ip}' -``` - - - - - -```bash -kubectl get svc -n upbound-system spaces-router \ - -o jsonpath='{.status.loadBalancer.ingress[0].ip}' -``` - - - -If the preceding command doesn't return a load balancer address then your -provider may not have allocated it yet. Once it's available, add a DNS record -for the `ROUTER_HOST` to point to the given load balancer address. If it's an -IPv4 address, add an A record. If it's a domain name, add a CNAME record. - -## Configure the up CLI - -With your kubeconfig pointed at the Kubernetes cluster where you installed -Upbound Spaces, create a new profile in the `up` CLI. This profile interacts -with your Space: - -```bash -up profile create --use ${SPACES_CLUSTER_NAME} --type=disconnected --organization ${UPBOUND_ACCOUNT} -``` - -Optionally, log in to your Upbound account using the new profile so you can use the Upbound Marketplace with this profile as well: - -```bash -up login -``` - - -## Connect to your Space - - -Use `up ctx` to create a kubeconfig context pointed at your new Space: - -```bash -up ctx disconnected/$(kubectl config current-context) -``` - -## Create your first control plane - -You can now create a control plane with the `up` CLI: - -```bash -up ctp create ctp1 -``` - -You can also create a control plane with kubectl: - -```yaml -cat < -Connect to your control plane with the up ctx command. With your kubeconfig -still pointed at the Kubernetes cluster where you installed the Upbound Space, -run the following: - - - -```bash -up ctx ./default/ctp1 -``` - -This command updates your current kubectl context. You're now connected to your control plane directly. Confirm this is the case by trying to list the CRDs in your control plane: - -```bash -kubectl get crds -``` - -To disconnect from your control plane and switch back to your previous context: - -```bash -up ctx - -``` - -:::tip -Learn how to use the up CLI to navigate around Upbound by reading the [up ctx command reference][up-ctx-command-reference]. -::: - -[up-ctx-command-reference]: /reference/cli-reference -[contact-upbound]: https://www.upbound.io/contact-us -[expose]: /self-hosted-spaces/howtos/ingress/ diff --git a/self-hosted-spaces_versioned_docs/version-1.16/howtos/simulations.md b/self-hosted-spaces_versioned_docs/version-1.16/howtos/simulations.md deleted file mode 100644 index 38a19d7d4..000000000 --- a/self-hosted-spaces_versioned_docs/version-1.16/howtos/simulations.md +++ /dev/null @@ -1,104 +0,0 @@ ---- -title: Simulate changes to your Control Plane Projects -sidebar_position: 100 -description: Use the Up CLI to mock operations before deploying to your environments. ---- - -:::important -The Simulations feature is in private preview. For more information, [reach out to Upbound][reach-out-to-upbound]. -::: - -Control plane simulations allow you to preview changes to your resources before -applying them to your control planes. Like a plan or dry-run operation, -simulations expose the impact of updates to compositions or claims without -changing your actual resources. - -A control plane simulation creates a temporary copy of your control plane and -returns a preview of the desired changes. The simulation change plan helps you -reduce the risk of unexpected behavior based on your changes. - -## Simulation benefits - -Control planes are dynamic systems that automatically reconcile resources to -match your desired state. Simulations provide visibility into this -reconciliation process by showing: - - -* New resources to create -* Existing resources to change -* Existing resources to delete -* How configuration changes propagate through the system - -These insights are crucial when planning complex changes or upgrading Crossplane -packages. - -## Requirements - -Simulations are available to select customers on Upbound Cloud with Team -Tier or higher. For more information, [reach out to Upbound][reach-out-to-upbound-1]. - -## How to simulate your control planes - -Before you start a simulation, build your project and use the `up -project run` command to run your control plane. - -Use the `up project simulate` command with your control plane name to start the -simulation: - -```ini {copy-lines="all"} -up project simulate --complete-after=60s --terminate-on-finish -``` - -The `complete-after` flag determines how long to run the simulation before it completes and calculates the results. Depending on the change, a simulation may not complete within your defined interval leaving unaffected resources as `unchanged`. - -The `terminate-on-finish` flag terminates the simulation after the time -you set - deleting the control plane that ran the simulation. - -At the end of your simulation, your CLI returns: -* A summary of the resources created, modified, or deleted -* Diffs for each resource affected - -## View your simulation in the Upbound Console -You can also view your simulation results in the Upbound Console: - -1. Navigate to your base control plane in the Upbound Console -2. Select the "Simulations" tab in the menu -3. Select a simulation object for a change list of all - resources affected. - -The Console provides visual indications of changes: - -- Created Resources: Marked with green -- Modified Resources: Marked with yellow -- Deleted Resources: Marked with red -- Unchanged Resources: Displayed in gray - -![Upbound Console Simulation](/img/simulations.png) - -## Considerations - -Simulations is a **private preview** feature. - -Be aware of the following limitations: - -- Simulations can't predict the exact behavior of external systems due to the - complexity and non-deterministic reconciliation pattern in Crossplane. - -- The only completion criteria for a simulation is time. Your simulation may not - receive a conclusive result within that interval. Upbound recommends the - default `60s` value. - -- Providers don't run in simulations. Simulations can't compose resources that - rely on the status of Managed Resources. - - -The Upbound team is working to improve these limitations. Your feedback is always appreciated. - -## Next steps - -For more information, follow the [tutorial][tutorial] on Simulations. - - -[tutorial]: /manuals/cli/howtos/simulations -[reach-out-to-upbound]: https://www.upbound.io/contact-us -[reach-out-to-upbound-1]: https://www.upbound.io/contact-us diff --git a/self-hosted-spaces_versioned_docs/version-1.16/howtos/space-observability.md b/self-hosted-spaces_versioned_docs/version-1.16/howtos/space-observability.md deleted file mode 100644 index 38e582981..000000000 --- a/self-hosted-spaces_versioned_docs/version-1.16/howtos/space-observability.md +++ /dev/null @@ -1,454 +0,0 @@ ---- -title: Configure Space-level observability -sidebar_position: 30 -description: Configure Space-level observability ---- - -:::important -This feature is GA since `v1.14.0`, requires Spaces `v1.6.0`, and is off by default. To enable, set `observability.enabled=true` (`features.alpha.observability.enabled=true` before `v1.14.0`) when installing Spaces: - -```bash -up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ - ... - --set "observability.enabled=true" \ -``` -::: - - -The observability feature collects telemetry data from user-facing control -plane workloads like: - -* Crossplane -* Providers -* Functions - -Self-hosted Spaces users can add control plane system workloads such as the -`api-server`, `etcd` by setting the -`observability.collectors.includeSystemTelemetry` Helm flag to true. - -### Sensitive data - -To avoid exposing sensitive data in the `SharedTelemetryConfig` resource, use -Kubernetes secrets to store the sensitive data and reference the secret in the -`SharedTelemetryConfig` resource. - -Create the secret in the same namespace/group as the `SharedTelemetryConfig` -resource. The example below uses `kubectl create secret` to create a new secret: - -```bash -kubectl create secret generic sensitive -n \ - --from-literal=apiKey='YOUR_API_KEY' -``` - -Next, reference the secret in the `SharedTelemetryConfig` resource: - -```yaml -apiVersion: observability.spaces.upbound.io/v1alpha1 -kind: SharedTelemetryConfig -metadata: - name: newrelic -spec: - configPatchSecretRefs: - - name: sensitive - key: apiKey - path: exporters.otlphttp.headers.api-key - controlPlaneSelector: - labelSelectors: - - matchLabels: - org: foo - exporters: - otlphttp: - endpoint: https://otlp.nr-data.net - headers: - api-key: dummy # This value is replaced by the secret value, can be omitted - exportPipeline: - metrics: [otlphttp] - traces: [otlphttp] - logs: [otlphttp] -``` - -The `configPatchSecretRefs` field in the `spec` specifies the secret `name`, -`key`, and `path` values to inject the secret value in the -`SharedTelemetryConfig` resource. - - -This guide explains how to configure Space-level observability. This feature is -only applicable to self-hosted Space administrators. This lets Space -administrators observe the cluster infrastructure where the Space software gets -installed. - - -When you enable observability in a Space, Upbound deploys a single -[OpenTelemetry Collector][opentelemetry-collector] to collect and export metrics, -logs, and traces to your configured observability backends. - - -## Prerequisites - -This feature requires the [OpenTelemetry Operator][opentelemetry-operator] on -the Space cluster. - -Note: If running Spaces v1.16 or later, use OpenTelemetry Operator v0.139.0 or -later due to breaking changes in the OpenTelemetry Operator. - -## Configuration - -To configure how Upbound exports telemetry, review the `spacesCollector` value in -your Space installation Helm chart. Supported exporters are `otlphttp`, `datadog`, -`splunk_hec`, and `debug`. Replace the exporter name and configuration options -based on your backend. - - -Below is an example using `otlphttp`. - - - -```yaml -observability: - spacesCollector: - env: - - name: API_KEY - valueFrom: - secretKeyRef: - name: my-secret - key: api-key - config: - exporters: - otlphttp: - endpoint: "" - headers: - api-key: ${env:API_KEY} - exportPipeline: - logs: - - otlphttp - metrics: - - otlphttp - traces: - - otlphttp -``` - - - -For exporter-specific configuration options, see the OpenTelemetry documentation -for [`otlphttp`][otlphttp-exporter], [`datadog`][datadog-exporter], and -[`splunk_hec`][splunk-exporter]. - - -You can export metrics, logs, and traces from your Crossplane installation, Spaces -infrastructure (controller, API, router, etc.), provider-helm, and -provider-kubernetes. - -### Router metrics - -The Spaces router component uses Envoy as a reverse proxy and exposes detailed -metrics about request handling, circuit breakers, and connection pooling. -Upbound collects these metrics in your Space after you enable Space-level -observability. - -Envoy metrics in Upbound include: - -- **Upstream cluster metrics** - Request status codes, timeouts, retries, and latency for traffic to control planes and services -- **Circuit breaker metrics** - Connection and request circuit breaker state for both `DEFAULT` and `HIGH` priority levels -- **Downstream listener metrics** - Client connections and requests received -- **HTTP connection manager metrics** - End-to-end HTTP request processing and latency - -For a complete list of available router metrics and example PromQL queries, see the [Router metrics reference][router-ref]. - -### Distributed tracing - -Spaces generates distributed traces through OpenTelemetry integration, -providing end-to-end visibility into request flow across the system. Use these -traces to debug latency issues, understand request paths, and correlate errors -across services. - -The router uses: - -- **Protocol**: OTLP (OpenTelemetry Protocol) over gRPC -- **Service name**: `spaces-router` -- **Transport**: TLS-encrypted connection to telemetry collector - -#### Trace configuration - -Enable tracing and configure the sampling rate with the following Helm values: - -```yaml -observability: - enabled: true - tracing: - enabled: true - sampling: - rate: 0.1 # Sample 10% of new traces (0.0-1.0) -``` - -The sampling behavior depends on whether a parent trace context exists: - -- **With parent context**: If a `traceparent` header is present, the parent's - sampling decision is respected, enabling proper distributed tracing across services. -- **Root spans**:. new traces without a parent, Envoy samples based on - `x-request-id` hashing. The default sampling rate is 10%. - -#### TLS configuration for external collectors - -To send traces to an external OTLP collector, configure the endpoint and TLS settings: - -```yaml -observability: - enabled: true - tracing: - enabled: true - endpoint: "otlp-gateway.example.com" - port: 443 - tls: - caBundleSecretRef: "custom-ca-secret" -``` - -If `caBundleSecretRef` is set, the router uses the CA bundle from the referenced -Kubernetes secret. The secret must contain a key named `ca.crt` with the -PEM-encoded CA bundle. If not set, the router uses the Spaces CA for the -in-cluster collector. - -#### Custom trace tags - -The router adds custom tags to every span to enable filtering and grouping by -control plane: - -| Tag | Source | Description | -|-----|--------|-------------| -| `controlplane.id` | `x-upbound-mxp-id` header | Control plane UUID | -| `controlplane.name` | `x-upbound-mxp-host` header | Internal vcluster hostname | -| `hostcluster.id` | `x-upbound-hostcluster-id` header | Host cluster identifier | - -These tags enable queries like "show all slow requests to control plane X" or -"find errors for control planes in host cluster Y." - -#### Example trace - -The following example shows the attributes from a successful GET request: - -```text -Span: ingress -├─ Service: spaces-router -├─ Duration: 8.025ms -├─ Attributes: -│ ├─ http.method: GET -│ ├─ http.status_code: 200 -│ ├─ upstream_cluster: ctp-b2b37aaa-ee55-492c-ba0c-4d561a6325fa-api-cluster -│ ├─ controlplane.id: b2b37aaa-ee55-492c-ba0c-4d561a6325fa -│ ├─ controlplane.name: vcluster.mxp-b2b37aaa-ee55-492c-ba0c-4d561a6325fa-system -│ └─ response_size: 1827 -``` -The router uses: - -- **Protocol**: OTLP (OpenTelemetry Protocol) over gRPC -- **Service name**: `spaces-router` -- **Transport**: TLS-encrypted connection to telemetry collector - -#### Trace configuration - -Enable tracing and configure the sampling rate with the following Helm values: - -```yaml -observability: - enabled: true - tracing: - enabled: true - sampling: - rate: 0.1 # Sample 10% of new traces (0.0-1.0) -``` - -The sampling behavior depends on whether a parent trace context exists: - -- **With parent context**: If a `traceparent` header is present, the parent's - sampling decision is respected, enabling proper distributed tracing across services. -- **Root spans**: For new traces without a parent, Envoy samples based on - `x-request-id` hashing. The default sampling rate is 10%. - -#### TLS configuration for external collectors - -To send traces to an external OTLP collector, configure the endpoint and TLS settings: - -```yaml -observability: - enabled: true - tracing: - enabled: true - endpoint: "otlp-gateway.example.com" - port: 443 - tls: - caBundleSecretRef: "custom-ca-secret" -``` - -If `caBundleSecretRef` is set, the router uses the CA bundle from the referenced -Kubernetes secret. The secret must contain a key named `ca.crt` with the -PEM-encoded CA bundle. If not set, the router uses the Spaces CA for the -in-cluster collector. - -#### Custom trace tags - -The router adds custom tags to every span to enable filtering and grouping by -control plane: - -| Tag | Source | Description | -|-----|--------|-------------| -| `controlplane.id` | `x-upbound-mxp-id` header | Control plane UUID | -| `controlplane.name` | `x-upbound-mxp-host` header | Internal vcluster hostname | -| `hostcluster.id` | `x-upbound-hostcluster-id` header | Host cluster identifier | - -These tags enable queries like "show all slow requests to control plane X" or -"find errors for control planes in host cluster Y." - -#### Example trace - -The following example shows the attributes from a successful GET request: - -```text -Span: ingress -├─ Service: spaces-router -├─ Duration: 8.025ms -├─ Attributes: -│ ├─ http.method: GET -│ ├─ http.status_code: 200 -│ ├─ upstream_cluster: ctp-b2b37aaa-ee55-492c-ba0c-4d561a6325fa-api-cluster -│ ├─ controlplane.id: b2b37aaa-ee55-492c-ba0c-4d561a6325fa -│ ├─ controlplane.name: vcluster.mxp-b2b37aaa-ee55-492c-ba0c-4d561a6325fa-system -│ └─ response_size: 1827 -``` -For detailed tracing configuration, custom tags, and example traces for each -component, see the [distributed tracing documentation](tracing/overview.md). - -## Available metrics - -Space-level observability collects metrics from multiple infrastructure components: - -### Infrastructure component metrics - -- Crossplane controller metrics -- Spaces controller, API, and router metrics -- Provider metrics (provider-helm, provider-kubernetes) - -### Router metrics - -The router component exposes Envoy proxy metrics for monitoring traffic flow and -service health. Key metric categories include: - -- `envoy_cluster_upstream_rq_*` - Upstream request metrics (status codes, timeouts, retries, latency) -- `envoy_cluster_circuit_breakers_*` - Circuit breaker state and capacity -- `envoy_listener_downstream_*` - Client connection and request metrics -- `envoy_http_downstream_*` - HTTP request processing metrics - -Example query to monitor total request rate: - -```promql -sum(rate(envoy_cluster_upstream_rq_total{job="spaces-router-envoy"}[5m])) -``` - -Example query for P95 latency: - -```promql -histogram_quantile( - 0.95, - sum by (le) ( - rate(envoy_cluster_upstream_rq_time_bucket{job="spaces-router-envoy"}[5m]) - ) -) -``` - -For detailed router metrics documentation and more query examples, see the [Router metrics reference][router-ref]. - - -## OpenTelemetryCollector image - - -Control plane (`SharedTelemetry`) and Space observability deploy the same custom -OpenTelemetry Collector image. The OpenTelemetry Collector image supports -`otlphttp`, `datadog`, `splunk_hec`, and `debug` exporters. - -For more information on observability configuration, review the [Helm chart reference][helm-chart-reference]. - -## Observability in control planes - -Read the [observability documentation][observability-documentation] to learn -about the features Upbound offers for collecting telemetry from control planes. - - -## Router metrics reference {#router-ref} - -To avoid overwhelming observability tools with hundreds of Envoy metrics, an -allow-list filters metrics to only the following metric families. - -### Upstream cluster metrics - -Metrics tracking requests sent from Envoy to configured upstream clusters. -Individual control planes, spaces-api, and other services are each considered -an upstream cluster. Use these metrics to monitor service health, identify -upstream errors, and measure backend latency. - -| Metric | Description | -|--------|-------------| -| `envoy_cluster_upstream_rq_xx_total` | HTTP status codes (2xx, 3xx, 4xx, 5xx) with label `envoy_response_code_class` | -| `envoy_cluster_upstream_rq_timeout_total` | Requests that timed out waiting for upstream | -| `envoy_cluster_upstream_rq_retry_limit_exceeded_total` | Requests that exhausted retry attempts | -| `envoy_cluster_upstream_rq_total` | Total upstream requests | -| `envoy_cluster_upstream_rq_time_bucket` | Latency histogram (for P50/P95/P99 calculations) | -| `envoy_cluster_upstream_rq_time_sum` | Sum of request durations | -| `envoy_cluster_upstream_rq_time_count` | Count of requests | - -### Circuit breaker metrics - - - -Metrics tracking circuit breaker state and remaining capacity. Circuit breakers -prevent cascading failures by limiting connections and concurrent requests to -unhealthy upstreams. Two priority levels exist: `DEFAULT` for watch requests and -`HIGH` for API requests. - - -| Name | Description | -|--------|-------------| -| `envoy_cluster_circuit_breakers_default_cx_open` | `DEFAULT` priority connection circuit breaker open (gauge) | -| `envoy_cluster_circuit_breakers_default_rq_open` | `DEFAULT` priority request circuit breaker open (gauge) | -| `envoy_cluster_circuit_breakers_default_remaining_cx` | Available `DEFAULT` priority connections (gauge) | -| `envoy_cluster_circuit_breakers_default_remaining_rq` | Available `DEFAULT` priority request slots (gauge) | -| `envoy_cluster_circuit_breakers_high_cx_open` | `HIGH` priority connection circuit breaker open (gauge) | -| `envoy_cluster_circuit_breakers_high_rq_open` | `HIGH` priority request circuit breaker open (gauge) | -| `envoy_cluster_circuit_breakers_high_remaining_cx` | Available `HIGH` priority connections (gauge) | -| `envoy_cluster_circuit_breakers_high_remaining_rq` | Available `HIGH` priority request slots (gauge) | - -### Downstream listener metrics - -Metrics tracking requests received from clients such as kubectl and API consumers. -Use these metrics to monitor client connection patterns, total request volume, -and responses sent to external users. - -| Name | Description | -|--------|-------------| -| `envoy_listener_downstream_rq_xx_total` | HTTP status codes for responses sent to clients | -| `envoy_listener_downstream_rq_total` | Total requests received from clients | -| `envoy_listener_downstream_cx_total` | Total connections from clients | -| `envoy_listener_downstream_cx_active` | Currently active client connections (gauge) | - - - -### HTTP connection manager metrics - - -Metrics from Envoy's HTTP connection manager tracking end-to-end request -processing. These metrics provide a comprehensive view of the HTTP request -lifecycle including status codes and client-perceived latency. - -| Name | Description | -|--------|-------------| -| `envoy_http_downstream_rq_xx` | HTTP status codes (note: no `_total` suffix for this metric family) | -| `envoy_http_downstream_rq_total` | Total HTTP requests received | -| `envoy_http_downstream_rq_time_bucket` | Downstream request latency histogram | -| `envoy_http_downstream_rq_time_sum` | Sum of downstream request durations | -| `envoy_http_downstream_rq_time_count` | Count of downstream requests | - -[router-ref]: #router-ref -[observability-documentation]: /self-hosted-spaces/howtos/observability -[opentelemetry-collector]: https://opentelemetry.io/docs/collector/ -[opentelemetry-operator]: https://opentelemetry.io/docs/kubernetes/operator/ -[helm-chart-reference]: /reference/spaces-helm-reference/ -[otlphttp-exporter]: https://github.com/open-telemetry/opentelemetry-collector/tree/main/exporter/otlphttpexporter -[datadog-exporter]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/datadogexporter -[splunk-exporter]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/splunkhecexporter diff --git a/self-hosted-spaces_versioned_docs/version-1.16/howtos/spaces-management.md b/self-hosted-spaces_versioned_docs/version-1.16/howtos/spaces-management.md deleted file mode 100644 index 67fee932e..000000000 --- a/self-hosted-spaces_versioned_docs/version-1.16/howtos/spaces-management.md +++ /dev/null @@ -1,213 +0,0 @@ ---- -title: Interacting with Disconnected Spaces -sidebar_position: 10 -description: Common operations in Spaces ---- - -## Spaces management - -### Create a Space - -To install an Upbound Space into a cluster, it's recommended you dedicate an entire Kubernetes cluster for the Space. You can use [up space init][up-space-init] to install an Upbound Space. Below is an example: - -```bash -up space init "v1.9.0" -``` -:::tip -For a full guide to get started with Spaces, read the [quickstart][quickstart] guide: -::: - -You can also install the helm chart for Spaces directly. In order for a Spaces install to succeed, you must install some prerequisites first and configure them. This includes: - -- UXP -- provider-helm and provider-kubernetes -- cert-manager - -Furthermore, the Spaces chart requires a pull secret, which Upbound must provide to you. - -```bash -helm -n upbound-system upgrade --install spaces \ - oci://xpkg.upbound.io/spaces-artifacts/spaces \ - --version "v1.9.0" \ - --set "ingress.host=your-host.com" \ - --set "clusterType=eks" \ - --set "account=your-upbound-account" \ - --wait -``` -For a complete helm install tutorial, read the deployment guides for [AWS][aws], [Azure][azure], or [GCP][gcp]. - -### Upgrade a Space - -To upgrade a Space from one version to the next, use [up space upgrade][up-space-upgrade]. Spaces supports upgrading from version `ver x.N.*` to version `ver x.N+1.*`. - -```bash -up space upgrade "v1.9.0" -``` - -You can also upgrade a Space by manually bumping the Helm chart version. Before -upgrading, review the release notes for any breaking changes or -special requirements: - -1. Review the release notes for the target version in the [Spaces Release Notes][spaces-release-notes] -2. Upgrade the Space by updating the helm chart version: - -```bash -helm -n upbound-system upgrade spaces \ - oci://xpkg.upbound.io/spaces-artifacts/spaces \ - --version "v1.9.0" \ - --reuse-values \ - --wait -``` - -For major version upgrades or configuration changes, extract your current values -and adjust: - -```bash -# Extract current values to a file -helm -n upbound-system get values spaces > spaces-values.yaml - -# Upgrade with modified values -helm -n upbound-system upgrade spaces \ - oci://xpkg.upbound.io/spaces-artifacts/spaces \ - --version "v1.9.0" \ - -f spaces-values.yaml \ - --wait -``` - -### Downgrade a Space - -To rollback a Space from one version to the previous, use [up space upgrade][up-space-upgrade-1]. Spaces supports downgrading from version `ver x.N.*` to version `ver x.N-1.*`. - -```bash -up space upgrade --rollback -``` - -You can also downgrade a Space manually using Helm by specifying an earlier version: - -```bash -helm -n upbound-system upgrade spaces \ - oci://xpkg.upbound.io/spaces-artifacts/spaces \ - --version "v1.8.0" \ - --reuse-values \ - --wait -``` - -When downgrading, make sure to: -1. Check the [release notes][release-notes] for specific downgrade instructions -2. Verify compatibility between the downgraded Space and any control planes -3. Back up any critical data before proceeding - -### Uninstall a Space - -To uninstall a Space from a Kubernetes cluster, use [up space destroy][up-space-destroy]. A destroy operation uninstalls core components and orphans control planes and their associated resources. - -```bash -up space destroy -``` - -## Control plane management - -You can manage control planes in a Space via the [up CLI][up-cli] or the Spaces-local Kubernetes API. When you install a Space, it defines new a API type, `kind: Controlplane`, that you can use to create and manage control planes in the Space. - -### Create a control plane - -To create a control plane in a Space using `up`, run the following: - -```bash -up ctp create ctp1 -``` - -You can also declare a new control plane like the example below and apply it to your Spaces cluster: - -```yaml -apiVersion: spaces.upbound.io/v1beta1 -kind: ControlPlane -metadata: - name: ctp1 - namespace: default -spec: - writeConnectionSecretToRef: - name: kubeconfig-ctp1 - namespace: default -``` - -This manifest: - -- Creates a new control plane in the space called `ctp1`. -- Publishes the kubeconfig to connect to the control plane to a secret in the Spaces cluster, called `kubeconfig-ctp1` - -### Connect to a control plane - -To connect to a control plane in a Space using `up`, run the following: - -```bash -up ctx ./default/ctp1 -``` - -The command changes your kubeconfig's current context to the control plane you specify. If you want to change your kubeconfig back to a previous context, run: - -```bash -up ctx - -``` - -If you configured your control plane to publish connection details, you can also access it this way. Once the control plane is ready, use the secret (containing connection details) to connect to the API server of your control plane. - -```bash -kubectl get secret -n default -o jsonpath='{.data.kubeconfig}' | base64 -d > /tmp/.yaml -``` - -Reference the kubeconfig whenever you want to interact directly with the API server of the control plane (vs the Space's API server): - -```bash -kubectl get providers --kubeconfig=/tmp/.yaml -``` - -### Configure a control plane - -Spaces offers a built-in feature that allows you to connect a control plane to a Git source. This experience is like when a control plane runs in [Upbound's SaaS environment][upbound-s-saas-environment]. Upbound recommends using the built-in Git integration to drive configuration of your control planes in a Space. - -Learn more in the [Spaces Git integration][spaces-git-integration] documentation. - -### List control planes - -To list all control planes in a Space using `up`, run the following: - -```bash -up ctp list -``` - -Or you can use Kubernetes-style semantics to list the control plane: - -```bash -kubectl get controlplanes -``` - - -### Delete a control plane - -To delete a control plane in a Space using `up`, run the following: - -```bash -up ctp delete ctp1 -``` - -Or you can use Kubernetes-style semantics to delete the control plane: - -```bash -kubectl delete controlplane ctp1 -``` - - -[up-space-init]: /reference/cli-reference -[quickstart]: / -[aws]: /self-hosted-spaces/howtos/self-hosted-spaces-deployment -[azure]:/self-hosted-spaces/howtos/self-hosted-spaces-deployment -[gcp]:/self-hosted-spaces/howtos/self-hosted-spaces-deployment -[up-space-upgrade]: /reference/cli-reference -[spaces-release-notes]: /reference/release-notes/spaces -[up-space-upgrade-1]: /reference/cli-reference -[release-notes]: /reference/release-notes/spaces -[up-space-destroy]: /reference/cli-reference -[up-cli]: /reference/cli-reference -[upbound-s-saas-environment]: /self-hosted-spaces/howtos/gitops -[spaces-git-integration]: /self-hosted-spaces/howtos/gitops diff --git a/self-hosted-spaces_versioned_docs/version-1.16/howtos/tracing/_category_.json b/self-hosted-spaces_versioned_docs/version-1.16/howtos/tracing/_category_.json deleted file mode 100644 index 6d3366ad4..000000000 --- a/self-hosted-spaces_versioned_docs/version-1.16/howtos/tracing/_category_.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "label": "Distributed Tracing", - "position": 50, - "collapsed": true, - "customProps": { - "plan": "business" - } -} diff --git a/self-hosted-spaces_versioned_docs/version-1.16/howtos/tracing/overview.md b/self-hosted-spaces_versioned_docs/version-1.16/howtos/tracing/overview.md deleted file mode 100644 index b9740d12f..000000000 --- a/self-hosted-spaces_versioned_docs/version-1.16/howtos/tracing/overview.md +++ /dev/null @@ -1,91 +0,0 @@ ---- -title: Distributed Tracing Overview -sidebar_position: 1 -description: Configure distributed tracing in Spaces for end-to-end request visibility. ---- - -Spaces uses distributed tracing to provide end-to-end visibility into request -flow across the system. Traces help with debugging performance issues, -understanding request patterns, and correlating operations across multiple -services. - -## Architecture - -The Spaces tracing architecture consists of: - -- **Instrumented components**: spaces-router, spaces-api, and query-api -- **Protocol**: OTLP (OpenTelemetry Protocol) -- **Collector**: telemetry-spaces-collector (in-cluster by default), with - support for bringing your own collector - -## Enabling tracing - -Configure tracing through Helm values when installing or upgrading Spaces: - -```yaml -observability: - enabled: true - tracing: - enabled: true - sampling: - rate: 0.1 # Sample 10% of new traces (0.0-1.0) -``` - -## Sampling strategy - -All components use **parent-based sampling**: - - -- **With parent context**: If a `traceparent` header is present, the system - respects the parent's sampling decision, enabling proper distributed tracing - across services. -- **Root spans**: For new traces without a parent, the system samples based on - the configured rate. The default sampling rate is 10%. - - -## TLS configuration - -### In-cluster collector (default) - -By default, components connect to the in-cluster telemetry collector using the -Spaces CA. No extra TLS configuration is needed: - -```yaml -observability: - enabled: true - tracing: - enabled: true -``` - -### External collectors - -To send traces to an external OTLP collector, configure a custom endpoint and -CA bundle: - -```yaml -observability: - enabled: true - tracing: - enabled: true - endpoint: "otel-collector.monitoring" - port: 443 - tls: - # The secret must contain a key named 'ca.crt' with the PEM-encoded - # CA bundle - caBundleSecretRef: "otel-collector-ca" -``` - - -When you set `caBundleSecretRef`, components use the CA bundle from the -referenced Kubernetes secret. When not set, components use the Spaces CA for the -in-cluster collector. - - -## Component-specific tracing - -Each component adds custom attributes for filtering and correlation. See the -component-specific documentation for details: - -- [Spaces Router traces](spaces-router.md) - reverse proxy routing to control planes -- [Spaces API traces](spaces-api.md) - Spaces resource management API -- [Query API traces](query-api.md) - cross-control plane query service diff --git a/self-hosted-spaces_versioned_docs/version-1.16/howtos/tracing/query-api.md b/self-hosted-spaces_versioned_docs/version-1.16/howtos/tracing/query-api.md deleted file mode 100644 index b4daf840e..000000000 --- a/self-hosted-spaces_versioned_docs/version-1.16/howtos/tracing/query-api.md +++ /dev/null @@ -1,89 +0,0 @@ ---- -title: Query API Traces -sidebar_position: 30 -description: Distributed tracing details for the Query API (Apollo) component. ---- - -Query API (Apollo) provides querying capabilities for Kubernetes resources -across multiple Crossplane control planes. - -For common tracing configuration, see the -[distributed tracing overview](overview.md). - -## Overview - -Service name: **`spaces-apollo`** - -Each request generates three span types: - -- Router ingress (routes to Apollo cluster) -- Apollo HTTP (processes query) -- PostgreSQL (executes SQL) - -## Architecture - -Requests flow through Spaces Router to Apollo: - -```text -Client → Spaces Router → Apollo (Query API) → PostgreSQL - │ │ │ │ - └───────────────────────────────────────────────→ Single Trace -``` - -Each request produces four spans: - -1. **Router ingress** - Envoy routes to `spaces-apollo` cluster -2. **Auth check** - External authorization validates request -3. **Apollo HTTP** - Query API processes request (service.name: `spaces-apollo`) -4. **PostgreSQL** - Database executes SQL - -### Enabling tracing - -Configure Query API tracing via Helm values: - -```yaml -apollo: - apollo: - observability: - enabled: true - tracing: - enabled: true - endpoint: "telemetry-spaces-collector.upbound-system.svc.cluster.local" - insecure: false - port: 4317 - sampling: - rate: 0.1 # 10% - tls: - caBundleSecretRef: spaces-ca -``` - -## Custom tags - -### Apollo spans - -Apollo exports spans with `service.name: query-api` and includes: - -| Attribute | Description | Example Value | -|-----------|-------------|---------------| -| `http.request.method` | HTTP method | `POST` | -| `url.path` | Request path | `/apis/query.spaces.upbound.io/v1alpha2/namespaces/default/queries` | -| `http.response.status_code` | Status code | `200` | -| `http.request.body.size` | Request size | `360` | -| `http.response.body.size` | Response size | `3702` | -| `server.address` | Server hostname | `127.0.0.1` | -| `network.peer.address` | Client IP | `192.168.2.11` | -| `user_agent.original` | User agent | `up-cli/v0.40.0 (darwin; arm64)` | - -### Database spans - -Database spans include PostgreSQL connection pool metrics: - -| Attribute | Description | Example Value | -|-----------|-------------|---------------| -| `db.system` | Database system | `postgresql` | -| `db.name` | Database name | `upbound` | -| `db.operation` | Operation type | `query` | -| `db.statement` | SQL query | Full SQL statement | -| `db.pool.total_conns` | Total connections | `1` | -| `db.pool.idle_conns` | Idle connections | `1` | -| `db.pool.acquired_conns` | Acquired connections | `0` | diff --git a/self-hosted-spaces_versioned_docs/version-1.16/howtos/tracing/spaces-api.md b/self-hosted-spaces_versioned_docs/version-1.16/howtos/tracing/spaces-api.md deleted file mode 100644 index 04745391d..000000000 --- a/self-hosted-spaces_versioned_docs/version-1.16/howtos/tracing/spaces-api.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -title: Spaces API Traces -sidebar_position: 20 -description: Distributed tracing details for the Spaces API component. ---- - -The Spaces API is the API server for managing Spaces resources -(ControlPlanes, SharedBackupConfigs, etc.) on the host cluster. - -For common tracing configuration, see the -[distributed tracing overview](overview.md). - -## Overview - -The Spaces API implements distributed tracing using **service.name**: -`spaces-api` - -## Custom tags - -The API adds custom tags to every span extracted from request headers. These -are in addition to standard span attributes: - -| Tag | Source | Description | Example | -|-----|--------|-------------|---------| -| `request.id` | `x-request-id` header | Request correlation ID | `a1b2c3d4-e5f6-7890-abcd-ef1234567890` | diff --git a/self-hosted-spaces_versioned_docs/version-1.16/howtos/tracing/spaces-router.md b/self-hosted-spaces_versioned_docs/version-1.16/howtos/tracing/spaces-router.md deleted file mode 100644 index b1f876d1c..000000000 --- a/self-hosted-spaces_versioned_docs/version-1.16/howtos/tracing/spaces-router.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -title: Spaces Router Traces -sidebar_position: 10 -description: Distributed tracing details for the Spaces Router (Envoy) component. ---- - -The Spaces Router uses Envoy as a reverse proxy to route traffic to control -planes. Envoy generates distributed traces through OpenTelemetry integration, -providing end-to-end visibility into request flow across the system. - -For common tracing configuration, see the -[distributed tracing overview](overview.md). - -## Overview - -The router implements distributed tracing using **service.name**: -`spaces-router` - -## Custom tags - -The router adds custom tags to every span to enable filtering and grouping by -control plane. These are in addition to standard HTTP span attributes: - -| Tag | Source | Description | Example | -|-----|--------|-------------|---------| -| `controlplane.id` | `x-upbound-mxp-id` header | Control plane UUID | `b2b37aaa-ee55-492c-ba0c-4d561a6325fa` | -| `controlplane.name` | `x-upbound-mxp-host` header | Internal vcluster hostname | `vcluster.mxp-b2b37aaa-ee55-...` | -| `hostcluster.id` | `x-upbound-hostcluster-id` header | Host cluster identifier (when present) | `a1b2c3d4-e5f6-7890-abcd-...` | - -These tags enable queries like: - -- "Show me all slow requests to control plane X" -- "Find errors for control planes in host cluster Y" -- "Trace a request across multiple control planes" - - -## Envoy-specific attributes - - -The router includes additional Envoy-specific attributes: - -| Attribute | Description | Example | -|-----------|-------------|---------| -| `upstream_cluster` | Target cluster name | `ctp-b2b37aaa-...-api-cluster` | -| `response_flags` | Envoy response flags | `-` (none), `UH` (no healthy upstream) | -| `node_id` | Envoy node identifier | `mxe-router` | -| `component` | Envoy component | `proxy` | -| `request_size` | Request body size in bytes | `0`, `1234` | -| `response_size` | Response body size in bytes | `1827` | diff --git a/self-hosted-spaces_versioned_docs/version-1.16/howtos/troubleshooting.md b/self-hosted-spaces_versioned_docs/version-1.16/howtos/troubleshooting.md deleted file mode 100644 index e39707be3..000000000 --- a/self-hosted-spaces_versioned_docs/version-1.16/howtos/troubleshooting.md +++ /dev/null @@ -1,132 +0,0 @@ ---- -title: Troubleshooting -sidebar_position: 100 -description: A guide for troubleshooting an issue that occurs in a Space ---- - -Find guidance below on how to find solutions for issues you encounter when deploying and using an Upbound Space. Use the tips below as a supplement to the observability metrics discussed in the [Observability][observability] page. - -## General tips - -Most issues fall into two general categories: - -1. issues with the Spaces management plane -2. issues on a control plane - -If your control plane doesn't reach a `Ready` state, it's indicative of the former. If your control plane is in a created and running state, but resources aren't reconciling, it's indicative of the latter. - -### Spaces component layout - -Run `kubectl get pods -A` against the cluster hosting a Space. You should see a variety of pods across several namespaces. It should look something like this: - -```bash -NAMESPACE NAME READY STATUS RESTARTS AGE -cert-manager cert-manager-6d6769565c-mc5df 1/1 Running 0 25m -cert-manager cert-manager-cainjector-744bb89575-nw4fg 1/1 Running 0 25m -cert-manager cert-manager-webhook-759d6dcbf7-ps4mq 1/1 Running 0 25m -ingress-nginx ingress-nginx-controller-7f8ccfccc6-6szlp 1/1 Running 0 25m -kube-system coredns-5d78c9869d-4p477 1/1 Running 0 26m -kube-system coredns-5d78c9869d-pdxt6 1/1 Running 0 26m -kube-system etcd-kind-control-plane 1/1 Running 0 26m -kube-system kindnet-8s7pq 1/1 Running 0 26m -kube-system kube-apiserver-kind-control-plane 1/1 Running 0 26m -kube-system kube-controller-manager-kind-control-plane 1/1 Running 0 26m -kube-system kube-proxy-l68r8 1/1 Running 0 26m -kube-system kube-scheduler-kind-control-plane 1/1 Running 0 26m -local-path-storage local-path-provisioner-6bc4bddd6b-qsdjt 1/1 Running 0 26m -mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system coredns-5dc69d6447-f56rh-x-kube-system-x-vcluster 1/1 Running 0 21m -mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system crossplane-6b6d67bc66-6b8nx-x-upbound-system-x-vcluster 1/1 Running 0 20m -mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system crossplane-rbac-manager-78f6fc7cb4-pjkhc-x-upbound-s-12253c3c4e 1/1 Running 0 20m -mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system kube-state-metrics-7f8f4dcc5b-8p8c4 1/1 Running 0 22m -mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system mxp-gateway-68f546b9c8-xnz5j-x-upbound-system-x-vcluster 1/1 Running 0 20m -mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system mxp-ksm-config-54655667bb-hv9br 1/1 Running 0 22m -mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system mxp-readyz-5f7f97d967-b98bw 1/1 Running 0 22m -mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system otlp-collector-56d7d46c8d-g5sh5-x-upbound-system-x-vcluster 1/1 Running 0 20m -mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system vcluster-67c9fb8959-ppb2m 1/1 Running 0 22m -mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system vcluster-api-6bfbccc49d-ffgpj 1/1 Running 0 22m -mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system vcluster-controller-7cc6855656-8c46b 1/1 Running 0 22m -mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system vcluster-etcd-0 1/1 Running 0 22m -mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system vector-754b494b84-wljw4 1/1 Running 0 22m -mxp-system mxp-charts-chartmuseum-7587f77558-8tltb 1/1 Running 0 23m -upbound-system crossplane-b4dc7b4c9-6hjh5 1/1 Running 0 25m -upbound-system crossplane-contrib-provider-helm-ce18dd03e6e4-7945d8985-4gcwr 1/1 Running 0 24m -upbound-system crossplane-contrib-provider-kubernetes-1f1e32c1957d-577756gs2x4 1/1 Running 0 24m -upbound-system crossplane-rbac-manager-d8cb49cbc-gbvvf 1/1 Running 0 25m -upbound-system spaces-controller-6647677cf9-5zl5q 1/1 Running 0 24m -upbound-system spaces-router-bc78c96d7-kzts2 2/2 Running 0 24m -``` - -What you are seeing is: - -- Pods in the `upbound-system` namespace are components required to run the management plane of the Space. This includes the `spaces-controller`, `spaces-router`, and install of UXP. -- Pods in the `mxp-{GUID}-system` namespace are components that collectively power a control plane. Notable call outs include pod names that look like `vcluster-api-{GUID}` and `vcluster-controller-{GUID}`, which are integral components of a control plane. -- Pods in other notable namespaces, including `cert-manager` and `ingress-nginx`, are prerequisite components that support a Space's successful operation. - - - -### Troubleshooting tips for the Spaces management plane - -Start by getting the status of all the pods in a Space: - -1. Make sure the current context of your kubeconfig points at the Kubernetes cluster hosting your Space -2. Get the status of all the pods in the Space: -```bash -kubectl get pods -A -``` -3. Scan the `Status` column to see if any of the pods report a status besides `Running`. -4. Scan the `Restarts` column to see if any of the pods have restarted. -5. If you notice a Status other than `Running` or see pods that restarted, you should investigate their events by running -```bash -kubectl describe pod -n -``` - -Next, inspect the status of objects and releases: - -1. Make sure the current context of your kubeconfig points at the Kubernetes cluster hosting your Space -2. Inspect the objects in your Space. If any are unhealthy, describe those objects to get the events: -```bash -kubectl get objects -``` -3. Inspect the releases in your Space. If any are unhealthy, describe those releases to get the events: -```bash -kubectl get releases -``` - -### Troubleshooting tips for control planes in a Space - -General troubleshooting in a control plane starts by fetching the events of the control plane: - -1. Make sure the current context of your kubeconfig points at the Kubernetes cluster hosting your Space -2. Run the following to fetch your control planes. -```bash -kubectl get ctp -``` -3. Describe the control plane by providing its name, found in the preceding instruction. -```bash -kubectl describe controlplanes.spaces.upbound.io -``` - -## Issues - - -### Your control plane is stuck in a 'creating' state - -#### Error: unknown field "ports" in io.k8s.api.networking.v1.NetworkPolicySpec - -This error is emitted by a Helm release named `control-plane-host-policies` attempting to be installed by the Spaces software. The full error is: - -_CannotCreateExternalResource failed to install release: unable to build kubernetes objects from release manifest: error validating "": error validating data: ValidationError(NetworkPolicy.spec): unknown field "ports" in io.k8s.api.networking.v1.NetworkPolicySpec_ - -This error may be caused by running a Space on an earlier version of Kubernetes than is supported (`v1.26 or later`). To resolve this issue, upgrade the host Kubernetes cluster version to 1.25 or later. - -### Your Spaces install fails - -#### Error: You tried to install a Space on a previous Crossplane installation - -If you try to install a Space on an existing cluster that previously had Crossplane or UXP on it, you may encounter errors. Due to how the Spaces installer tests for the presence of UXP, it may detect orphaned CRDs that weren't cleaned up by the previous uninstall of Crossplane. You may need to manually [remove old Crossplane CRDs][remove-old-crossplane-crds] for the installer to properly detect the UXP prerequisite. - - - - -[observability]: /self-hosted-spaces/howtos/observability -[remove-old-crossplane-crds]: https://docs.crossplane.io/latest/guides/uninstall-crossplane/ diff --git a/self-hosted-spaces_versioned_docs/version-1.16/howtos/use-argo.md b/self-hosted-spaces_versioned_docs/version-1.16/howtos/use-argo.md deleted file mode 100644 index e469a144d..000000000 --- a/self-hosted-spaces_versioned_docs/version-1.16/howtos/use-argo.md +++ /dev/null @@ -1,223 +0,0 @@ ---- -title: Use ArgoCD Plugin -sidebar_position: 15 -description: A guide for integrating Argo with control planes in a Space. -aliases: - - /all-spaces/self-hosted-spaces/use-argo - - /deploy/disconnected-spaces/use-argo-flux - - /all-spaces/self-hosted-spaces/use-argo-flux - - /connect/use-argo ---- - - - -:::important -This feature is in preview and is off by default. To enable, set `features.alpha.argocdPlugin.enabled=true` when installing Spaces: - -```bash -up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ - ... - --set "features.alpha.argocdPlugin.enabled=true" -``` -::: - -Spaces provides an optional plugin to assist with integrating a control plane in a Space with Argo CD. You must enable the plugin for the entire Space at Spaces install or upgrade time. The plugin's job is to propagate the connection details of each control plane in a Space to Argo CD. By default, Upbound stores these connection details in a Kubernetes secret named after the control plane. To run Argo CD across multiple namespaces, Upbound recommends enabling the `features.alpha.argocdPlugin.useUIDFormatForCTPSecrets` flag to use a UID-based format for secret names to avoid conflicts. - -:::tip -For general guidance on integrating Upbound with GitOps flows, see [GitOps with Control Planes][gitops-with-control-planes]. -::: - -## On cluster Argo CD - -If you are running Argo CD on the same cluster as the Space, run the following to enable the plugin: - - - - - - -```bash {hl_lines="3-4"} -up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ - --set "account=${UPBOUND_ACCOUNT}" \ - --set "features.alpha.argocdPlugin.enabled=true" \ - --set "features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true" \ - --set "features.alpha.argocdPlugin.target.secretNamespace=argocd" -``` - - - - - -```bash {hl_lines="7-8"} -helm -n upbound-system upgrade --install spaces \ - oci://xpkg.upbound.io/spaces-artifacts/spaces \ - --version "${SPACES_VERSION}" \ - --set "ingress.host=${SPACES_ROUTER_HOST}" \ - --set "account=${UPBOUND_ACCOUNT}" \ - --set "features.alpha.argocdPlugin.enabled=true" \ - --set "features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true" \ - --set "features.alpha.argocdPlugin.target.secretNamespace=argocd" \ - --wait -``` - - - - - - -The important flags are: - -- `features.alpha.argocdPlugin.enabled=true` -- `features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true` -- `features.alpha.argocdPlugin.target.secretNamespace=argocd` - -The first flag enables the feature and the second indicates the namespace on the cluster where you installed Argo CD. - -Be sure to [configure Argo][configure-argo] after it's installed. - -## External cluster Argo CD - -If you are running Argo CD on an external cluster from where you installed your Space, you need to provide some extra flags: - - - - - - -```bash {hl_lines="3-7"} -up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ - --set "account=${UPBOUND_ACCOUNT}" \ - --set "features.alpha.argocdPlugin.enabled=true" \ - --set "features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true" \ - --set "features.alpha.argocdPlugin.target.secretNamespace=argocd" \ - --set "features.alpha.argocdPlugin.target.externalCluster.enabled=true" \ - --set "features.alpha.argocdPlugin.target.externalCluster.secret.name=my-argo-cluster" \ - --set "features.alpha.argocdPlugin.target.externalCluster.secret.key=kubeconfig" -``` - - - - - -```bash {hl_lines="7-11"} -helm -n upbound-system upgrade --install spaces \ - oci://xpkg.upbound.io/spaces-artifacts/spaces \ - --version "${SPACES_VERSION}" \ - --set "ingress.host=${SPACES_ROUTER_HOST}" \ - --set "account=${UPBOUND_ACCOUNT}" \ - --set "features.alpha.argocdPlugin.enabled=true" \ - --set "features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true" \ - --set "features.alpha.argocdPlugin.target.secretNamespace=argocd" \ - --set "features.alpha.argocdPlugin.target.externalCluster.enabled=true" \ - --set "features.alpha.argocdPlugin.target.externalCluster.secret.name=my-argo-cluster" \ - --set "features.alpha.argocdPlugin.target.externalCluster.secret.key=kubeconfig" \ - --wait -``` - - - - - -```bash -helm -n upbound-system upgrade --install spaces \ - oci://xpkg.upbound.io/spaces-artifacts/spaces \ - --version "${SPACES_VERSION}" \ - --set "ingress.host=${SPACES_ROUTER_HOST}" \ - --set "account=${UPBOUND_ACCOUNT}" \ - --set "features.alpha.argocdPlugin.enabled=true" \ - --set "features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true" \ - --set "features.alpha.argocdPlugin.target.secretNamespace=argocd" \ - --set "features.alpha.argocdPlugin.target.externalCluster.enabled=true" \ - --set "features.alpha.argocdPlugin.target.externalCluster.secret.name=my-argo-cluster" \ - --set "features.alpha.argocdPlugin.target.externalCluster.secret.key=kubeconfig" \ - --wait -``` - -The extra flags are: - -- `features.alpha.argocdPlugin.target.externalCluster.enabled=true` -- `features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true` -- `features.alpha.argocdPlugin.target.externalCluster.secret.name=my-argo-cluster` -- `features.alpha.argocdPlugin.target.externalCluster.secret.key=kubeconfig` - -These flags tell the plugin (running in Spaces) where your Argo CD instance is. After you've done this at install-time, you also need to create a `Secret` on the Spaces cluster. This secret must contain a kubeconfig pointing to your Argo CD instance. The secret needs to be in the same namespace as the `spaces-controller`, which is `upbound-system`. - -Once you enable the plugin and configure it, the plugin automatically propagates connection details for your control planes to your Argo CD instance. You can then target the control plane and use Argo to sync Crossplane-related objects to it. - -Be sure to [configure Argo][configure-argo-1] after it's installed. - -## Configure Argo - -Argo's default configuration causes it to try to query for resource kinds that don't exist in control planes. You should configure Argo's [general configmap][general-configmap] to include the resource group/kinds which make sense in the context of control planes. For example, the concept of `nodes` isn't exposed in control planes. - -To configure Argo CD, connect to the cluster where you've installed it and edit the configmap: - -```bash -kubectl edit configmap argocd-cm -n argocd -``` - -Adjust the resource inclusions and exclusions under the `data` field of the configmap: - -```yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: argocd-cm - namespace: argocd -data: - resource.exclusions: | - - apiGroups: - - "*" - kinds: - - "*" - clusters: - - "*" - resource.inclusions: | - - apiGroups: - - "*" - kinds: - - Provider - - Configuration - clusters: - - "*" -``` - -The preceding configuration causes Argo to exclude syncing **all** resource group/kinds--except Crossplane `providers` and `configurations`--for **all** control planes. You're encouraged to adjust the `resource.inclusions` to include the types that make sense for your control plane, such as an `XRD` you've built with Crossplane. You're also encouraged to customize the `clusters` pattern to selectively apply these exclusions/inclusions to control planes (for example, `control-plane-prod-*`). - -## Control plane connection secrets - -To deploy control planes through Argo CD, you need to configure the `writeConnectionSecretToRef` field in your control plane spec. This field specifies where to store the control plane's `kubeconfig` and makes connection details available to Argo CD. - -### Basic Configuration - -In your control plane manifest, include the `writeConnectionSecretToRef` field: - -```yaml -apiVersion: spaces.upbound.io/v1beta1 -kind: ControlPlane -metadata: - name: my-control-plane - namespace: my-control-plane-group -spec: - writeConnectionSecretToRef: - name: kubeconfig-my-control-plane - namespace: my-control-plane-group - # ... other control plane configuration -``` - -### Parameters - -The `writeConnectionSecretToRef` field requires two parameters: - -- `name`: A unique name for the secret containing the kubeconfig (`kubeconfig-my-control-plane`) -- `namespace`: The Kubernetes namespace where you store the secret, which must match the metadata namespace. The system copies it into the `argocd` namespace when you set the `features.alpha.argocdPlugin.target.secretNamespace=argocd` configuration parameter. - -Control plane labels automatically propagate to the connection secret, which allows you to use label selectors in Argo CD for automated discovery and management. - -This configuration enables Argo CD to automatically discover and manage resources on your control planes. - - -[gitops-with-control-planes]: /cloud-spaces/howtos/gitops-on-upbound -[configure-argo]: #configure-argo -[configure-argo-1]: #configure-argo -[general-configmap]: https://argo-cd.readthedocs.io/en/stable/operator-manual/argocd-cm-yaml/ diff --git a/self-hosted-spaces_versioned_docs/version-1.16/howtos/workload-id/_category_.json b/self-hosted-spaces_versioned_docs/version-1.16/howtos/workload-id/_category_.json deleted file mode 100644 index c5ecc93f6..000000000 --- a/self-hosted-spaces_versioned_docs/version-1.16/howtos/workload-id/_category_.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "label": "Workload Identity Configuration", - "position": 2, - "collapsed": true, - "customProps": { - "plan": "business" - } - -} - - diff --git a/self-hosted-spaces_versioned_docs/version-1.16/howtos/workload-id/backup-restore-config.md b/self-hosted-spaces_versioned_docs/version-1.16/howtos/workload-id/backup-restore-config.md deleted file mode 100644 index 776171161..000000000 --- a/self-hosted-spaces_versioned_docs/version-1.16/howtos/workload-id/backup-restore-config.md +++ /dev/null @@ -1,384 +0,0 @@ ---- -title: Backup and Restore Workload ID -weight: 1 -description: Configure workload identity for Spaces Backup and Restore ---- -import GlobalLanguageSelector, { CodeBlock } from '@site/src/components/GlobalLanguageSelector'; - - - - - - - -Workload-identity authentication lets you use access policies to grant temporary -AWS credentials to your Kubernetes pod with a service account. Assigning IAM roles and service accounts allows the pod to assume the IAM role dynamically and much more securely than static credentials. - -This guide walks you through creating an IAM trust role policy and applying it -to your EKS cluster to handle backup and restore storage. - - - - - -Workload-identity authentication lets you use access policies to grant your -self-hosted Space cluster access to your cloud providers. Workload identity -authentication grants temporary Azure credentials to your Kubernetes pod based on -a service account. Assigning managed identities and service accounts allows the pod to -authenticate with Azure resources dynamically and much more securely than static credentials. - -This guide walks you through creating a managed identity and federated credential for your AKS -cluster to handle backup and restore storage. - - - - - -Workload-identity authentication lets you use access policies to grant your -self-hosted Space cluster access to your cloud providers. Workload identity -authentication grants temporary GCP credentials to your Kubernetes pod based on -a service account. Assigning IAM roles and service accounts allows the pod to -access cloud resources dynamically and much more securely than static credentials. - -This guide walks you through configuring workload identity for your GKE -cluster to handle backup and restore storage. - - - -## Prerequisites - - -To set up a workload-identity, you'll need: - - -- A self-hosted Space cluster -- Administrator access in your cloud provider -- Helm and `kubectl` - -## About the backup and restore component - -The `mxp-controller` component handles backup and restore workloads. It needs to -access your cloud storage to store and retrieve backups. By default, this -component runs in each control plane's host namespace. - -## Configuration - - - -Upbound supports workload-identity configurations in AWS with IAM Roles for -Service Accounts and EKS pod identity association. - -#### IAM Roles for Service Accounts (IRSA) - -With IRSA, you can associate a Kubernetes service account in an EKS cluster with -an AWS IAM role. Upbound authenticates workloads with that service account as -the IAM role using temporary credentials instead of static role credentials. -IRSA relies on AWS `AssumeRoleWithWebIdentity` `STS` to exchange OIDC ID tokens with -the IAM role's temporary credentials. IRSA uses the `eks.amazon.aws/role-arn` -annotation to link the service account and the IAM role. - -First, create an IAM role with appropriate permissions to access your S3 bucket: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "s3:GetObject", - "s3:PutObject", - "s3:ListBucket", - "s3:DeleteObject" - ], - "Resource": [ - "arn:aws:s3:::${YOUR_BACKUP_BUCKET}", - "arn:aws:s3:::${YOUR_BACKUP_BUCKET}/*" - ] - } - ] -} -``` - -Next, ensure your EKS cluster has an OIDC identity provider: - -```shell -eksctl utils associate-iam-oidc-provider --cluster ${YOUR_CLUSTER_NAME} --approve -``` - -Configure the IAM role trust policy with the namespace for each -provisioned control plane. - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": "arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:oidc-provider/${YOUR_OIDC_PROVIDER}" - }, - "Action": "sts:AssumeRoleWithWebIdentity", - "Condition": { - "StringEquals": { - "${YOUR_OIDC_PROVIDER}:aud": "sts.amazonaws.com", - "${YOUR_OIDC_PROVIDER}:sub": "system:serviceaccount:${YOUR_NAMESPACE}:mxp-controller" - } - } - } - ] -} -``` - -In your control plane, pass the `--set` flag with the Spaces Helm chart -parameters for the Backup and Restore component: - -```shell ---set controlPlanes.mxpController.serviceAccount.annotations."eks\.amazonaws\.com/role-arn"="${SPACES_BR_IAM_ROLE_ARN}" -``` - -This command allows the backup and restore component to authenticate with your -dedicated IAM role in your EKS cluster environment. - -#### EKS pod identities - -Upbound also supports EKS Pod Identity configuration. EKS Pod Identities allow -you to create a pod identity association with your Kubernetes namespace, a -service account, and an IAM role, which allows the EKS control plane to -automatically handle the credential exchange. - -First, create an IAM role with appropriate permissions to access your S3 bucket: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "s3:GetObject", - "s3:PutObject", - "s3:ListBucket", - "s3:DeleteObject" - ], - "Resource": [ - "arn:aws:s3:::${YOUR_BACKUP_BUCKET}", - "arn:aws:s3:::${YOUR_BACKUP_BUCKET}/*" - ] - } - ] -} -``` - -When you install or upgrade your Space with Helm, add the backup/restore values: - -```shell -helm upgrade spaces spaces-helm-chart \ - --set "billing.enabled=true" \ - --set "backup.enabled=true" \ - --set "backup.storage.provider=aws" \ - --set "backup.storage.aws.region= ${YOUR_AWS_REGION}" \ - --set "backup.storage.aws.bucket= ${YOUR_BACKUP_BUCKET}" -``` - -After Upbound provisions your control plane, create a Pod Identity Association -with the `aws` CLI: - -```shell -aws eks create-pod-identity-association \ - --cluster-name ${YOUR_CLUSTER_NAME} \ - --namespace ${YOUR_CONTROL_PLANE_NAMESPACE} \ - --service-account mxp-controller \ - --role-arn arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:role/backup-restore-role -``` - - - - - -Upbound supports workload-identity configurations in Azure with Azure's built-in -workload identity feature. - -#### Prepare your cluster - -First, enable the OIDC issuer and workload identity in your AKS cluster: - -```shell -az aks update --resource-group ${YOUR_RESOURCE_GROUP} --name ${YOUR_AKS_CLUSTER_NAME} --enable-oidc-issuer --enable-workload-identity -``` - -Next, find and store the OIDC issuer URL as an environment variable: - -```shell -export AKS_OIDC_ISSUER="$(az aks show --name ${YOUR_AKS_CLUSTER_NAME} --resource-group ${YOUR_RESOURCE_GROUP} --query "oidcIssuerProfile.issuerUrl" --output tsv)" -``` - -#### Create a User-Assigned Managed Identity - -Create a new managed identity to associate with the backup and restore component: - -```shell -az identity create --name backup-restore-identity --resource-group ${YOUR_RESOURCE_GROUP} --location ${YOUR_LOCATION} -``` - -Retrieve the client ID and store it as an environment variable: - -```shell -export USER_ASSIGNED_CLIENT_ID="$(az identity show --name backup-restore-identity --resource-group ${YOUR_RESOURCE_GROUP} --query clientId -otsv)" -``` - -Grant the managed identity you created to access your Azure Storage account: - -```shell -az role assignment create \ - --role "Storage Blob Data Contributor" \ - --assignee ${USER_ASSIGNED_CLIENT_ID} \ - --scope /subscriptions/${YOUR_SUBSCRIPTION_ID}/resourceGroups/${YOUR_RESOURCE_GROUP}/providers/Microsoft.Storage/storageAccounts/${YOUR_STORAGE_ACCOUNT} -``` - -#### Apply the managed identity role - -In your control plane, pass the `--set` flag with the Spaces Helm chart -parameters for the backup and restore component: - -```shell ---set controlPlanes.mxpController.serviceAccount.annotations."azure\.workload\.identity/client-id"="${YOUR_USER_ASSIGNED_CLIENT_ID}" ---set controlPlanes.mxpController.pod.customLabels."azure\.workload\.identity/use"="true" -``` - -#### Create a Federated Identity credential - -```shell -az identity federated-credential create \ - --name backup-restore-federated-identity \ - --identity-name backup-restore-identity \ - --resource-group ${YOUR_RESOURCE_GROUP} \ - --issuer ${AKS_OIDC_ISSUER} \ - --subject system:serviceaccount:${YOUR_CONTROL_PLANE_NAMESPACE}:mxp-controller -``` - - - - - -Upbound supports workload-identity configurations in GCP with IAM principal -identifiers and service account impersonation. - -#### Prepare your cluster - -First, enable Workload Identity Federation on your GKE cluster: - -```shell -gcloud container clusters update ${YOUR_CLUSTER_NAME} \ - --workload-pool=${YOUR_PROJECT_ID}.svc.id.goog \ - --region=${YOUR_REGION} -``` - -#### Create a Google Service Account - -Create a service account for the backup and restore component: - -```shell -gcloud iam service-accounts create backup-restore-sa \ - --display-name "Backup Restore Service Account" \ - --project ${YOUR_PROJECT_ID} -``` - -Grant the service account access to your Google Cloud Storage bucket: - -```shell -gcloud projects add-iam-policy-binding ${YOUR_PROJECT_ID} \ - --member "serviceAccount:backup-restore-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" \ - --role "roles/storage.objectAdmin" -``` - -#### Configure Workload Identity - -Create an IAM binding to grant the Kubernetes service account access to the Google service account: - -```shell -gcloud iam service-accounts add-iam-policy-binding \ - backup-restore-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com \ - --role roles/iam.workloadIdentityUser \ - --member "serviceAccount:${YOUR_PROJECT_ID}.svc.id.goog[${YOUR_CONTROL_PLANE_NAMESPACE}/mxp-controller]" -``` - -#### Apply the service account configuration - -In your control plane, pass the `--set` flag with the Spaces Helm chart -parameters for the backup and restore component: - -```shell ---set controlPlanes.mxpController.serviceAccount.annotations."iam\.gke\.io/gcp-service-account"="backup-restore-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" -``` - - - -## Verify your configuration - -After you apply the configuration use `kubectl` to verify the service account -has the correct annotation: - -```shell -kubectl get serviceaccount mxp-controller -n ${YOUR_CONTROL_PLANE_NAMESPACE} -o yaml -``` - -Verify the `mxp-controller` pod is running: - -```shell -kubectl get pods -n ${YOUR_CONTROL_PLANE_NAMESPACE} | grep mxp-controller -``` - -## Restart workload - -You must manually restart a workload's pod when you add the workload identity annotations to the running pod's service account. - - - -This restart enables the EKS pod identity webhook to inject the necessary -environment for using IRSA. - - - - - -This restart enables the workload identity webhook to inject the necessary -environment for using Azure workload identity. - - - - - -This restart enables the workload identity webhook to inject the necessary -environment for using GCP workload identity. - - - -```shell -kubectl rollout restart deployment mxp-controller -n ${YOUR_CONTROL_PLANE_NAMESPACE} -``` - -## Use cases - - -Configuring backup and restore with workload identity eliminates the need for -static credentials in your cluster and the overhead of credential rotation. -These benefits are helpful in: - -* Disaster recovery scenarios -* Control plane migration -* Compliance requirements -* Rollbacks after unsuccessful upgrades - -## Next steps - -Now that you have a workload identity configured for the backup and restore -component, visit the [Backup Configuration][backup-restore-guide] documentation. - -Other workload identity guides are: -* [Billing][billing] -* [Shared Secrets][secrets] - -[backup-restore-guide]: /self-hosted-spaces/howtos/backup-and-restore -[billing]: /self-hosted-spaces/howtos/workload-id/billing-config -[secrets]: /self-hosted-spaces/howtos/workload-id/eso-config diff --git a/self-hosted-spaces_versioned_docs/version-1.16/howtos/workload-id/billing-config.md b/self-hosted-spaces_versioned_docs/version-1.16/howtos/workload-id/billing-config.md deleted file mode 100644 index 737f446ab..000000000 --- a/self-hosted-spaces_versioned_docs/version-1.16/howtos/workload-id/billing-config.md +++ /dev/null @@ -1,454 +0,0 @@ ---- -title: Billing Workload ID -weight: 1 -description: Configure workload identity for Spaces Billing ---- -import GlobalLanguageSelector, { CodeBlock } from '@site/src/components/GlobalLanguageSelector'; - - - - - - - -Workload-identity authentication lets you use access policies to grant your -self-hosted Space cluster access to your cloud providers. Workload identity -authentication grants temporary AWS credentials to your Kubernetes pod based on -a service account. Assigning IAM roles and service accounts allows the pod to -assume the IAM role dynamically and much more securely than static credentials. - -This guide walks you through creating an IAM trust role policy and applying it to your EKS -cluster for billing in your Space cluster. - - - - - -Workload-identity authentication lets you use access policies to grant your -self-hosted Space cluster access to your cloud providers. Workload identity -authentication grants temporary Azure credentials to your Kubernetes pod based on -a service account. Assigning managed identities and service accounts allows the pod to -authenticate with Azure resources dynamically and much more securely than static credentials. - -This guide walks you through creating a managed identity and federated credential for your AKS -cluster for billing in your Space cluster. - - - - - -Workload-identity authentication lets you use access policies to grant your -self-hosted Space cluster access to your cloud providers. Workload identity -authentication grants temporary GCP credentials to your Kubernetes pod based on -a service account. Assigning IAM roles and service accounts allows the pod to -access cloud resources dynamically and much more securely than static -credentials. - -This guide walks you through configuring workload identity for your GKE -cluster's billing component. - - - -## Prerequisites - - -To set up a workload-identity, you'll need: - - -- A self-hosted Space cluster -- Administrator access in your cloud provider -- Helm and `kubectl` - -## About the billing component - -The `vector.dev` component handles billing metrics collection in spaces. It -stores account data in your cloud storage. By default, this component runs in -each control plane's host namespace. - -## Configuration - - - -Upbound supports workload-identity configurations in AWS with IAM Roles for -Service Accounts and EKS pod identity association. - -#### IAM Roles for Service Accounts (IRSA) - -With IRSA, you can associate a Kubernetes service account in an EKS cluster with -an AWS IAM role. Upbound authenticates workloads with that service account as -the IAM role using temporary credentials instead of static role credentials. -IRSA relies on AWS `AssumeRoleWithWebIdentity` `STS` to exchange OIDC ID tokens with -the IAM role's temporary credentials. IRSA uses the `eks.amazon.aws/role-arn` -annotation to link the service account and the IAM role. - -**Create an IAM role and trust policy** - -First, create an IAM role appropriate permissions to access your S3 bucket: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "s3:GetObject", - "s3:PutObject", - "s3:ListBucket", - "s3:DeleteObject" - ], - "Resource": [ - "arn:aws:s3:::${YOUR_BILLING_BUCKET}", - "arn:aws:s3:::${YOUR_BILLING_BUCKET}/*" - ] - } - ] -} -``` - -You must configure the IAM role trust policy with the exact match for each -provisioned control plane. An example of a trust policy for a single control -plane is below: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": "arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:oidc-provider/${YOUR_OIDC_PROVIDER}" - }, - "Action": "sts:AssumeRoleWithWebIdentity", - "Condition": { - "StringEquals": { - ":aud": "sts.amazonaws.com", - ":sub": "system:serviceaccount:${YOUR_NAMESPACE}:vector" - } - } - } - ] -} -``` - -**Configure the EKS OIDC provider** - -Next, ensure your EKS cluster has an OIDC identity provider: - -```shell -eksctl utils associate-iam-oidc-provider --cluster ${YOUR_CLUSTER_NAME} --approve -``` - -**Apply the IAM role** - -In your control plane, pass the `--set` flag with the Spaces Helm chart -parameters for the Billing component: - -```shell ---set "billing.enabled=true" ---set "billing.storage.provider=aws" ---set "billing.storage.aws.region=${YOUR_AWS_REGION}" ---set "billing.storage.aws.bucket=${YOUR_BILLING_BUCKET}" ---set "billing.storage.secretRef.name=" ---set controlPlanes.vector.serviceAccount.customAnnotations."eks\.amazonaws\.com/role-arn"="arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:role/${YOUR_BILLING_ROLE_NAME}" -``` - -:::important -You **must** set the `billing.storage.secretRef.name` to an empty string to -enable workload identity for the billing component -::: - -#### EKS pod identities - -Upbound also supports EKS Pod Identity configuration. EKS Pod Identities allow -you to create a pod identity association with your Kubernetes namespace, a -service account, and an IAM role, which allows the EKS control plane to -automatically handle the credential exchange. - -**Create an IAM role** - -First, create an IAM role appropriate permissions to access your S3 bucket: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "s3:GetObject", - "s3:PutObject", - "s3:ListBucket" - ], - "Resource": [ - "arn:aws:s3:::${YOUR_BILLING_BUCKET}", - "arn:aws:s3:::${YOUR_BILLING_BUCKET}/*" - ] - } - ] -} -``` - -**Configure your Space with Helm** - -When you install or upgrade your Space with Helm, add the billing values: - -```shell -helm upgrade spaces spaces-helm-chart \ - --set "billing.enabled=true" \ - --set "billing.storage.provider=aws" \ - --set "billing.storage.aws.region=${YOUR_AWS_REGION}" \ - --set "billing.storage.aws.bucket=${YOUR_BILLING_BUCKET}" \ - --set "billing.storage.secretRef.name=" -``` - -**Create a Pod Identity Association** - -After Upbound provisions your control plane, create a Pod Identity Association -with the `aws` CLI: - -```shell -aws eks create-pod-identity-association \ - --cluster-name ${YOUR_CLUSTER_NAME} \ - --namespace ${YOUR_CONTROL_PLANE_NAMESPACE} \ - --service-account vector \ - --role-arn arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:role/${YOUR_BILLING_ROLE_NAME} -``` - - - - - -Upbound supports workload-identity configurations in Azure with Azure's built-in -workload identity feature. - -First, enable the OIDC issuer and workload identity in your AKS cluster: - -```shell -az aks update --resource-group ${YOUR_RESOURCE_GROUP} --name ${YOUR_AKS_CLUSTER_NAME} --enable-oidc-issuer --enable-workload-identity -``` - -Next, find and store the OIDC issuer URL as an environment variable: - -```shell -export AKS_OIDC_ISSUER="$(az aks show --name ${YOUR_AKS_CLUSTER_NAME} --resource-group ${YOUR_RESOURCE_GROUP} --query "oidcIssuerProfile.issuerUrl" --output tsv)" -``` - -Create a new managed identity to associate with the billing component: - -```shell -az identity create --name billing-identity --resource-group ${YOUR_RESOURCE_GROUP} --location ${YOUR_LOCATION} -``` - -Retrieve the client ID and store it as an environment variable: - -```shell -export USER_ASSIGNED_CLIENT_ID="$(az identity show --name billing-identity --resource-group ${YOUR_RESOURCE_GROUP} --query clientId -otsv)" -``` - -Grant the managed identity you created to access your Azure Storage account: - -```shell -az role assignment create --role "Storage Blob Data Contributor" --assignee $USER_ASSIGNED_CLIENT_ID --scope /subscriptions/${YOUR_SUBSCRIPTION_ID}/resourceGroups/${YOUR_RESOURCE_GROUP}/providers/Microsoft.Storage/storageAccounts/${YOUR_STORAGE_ACCOUNT} -``` - -In your control plane, pass the `--set` flag with the Spaces Helm chart -parameters for the billing component: - -```shell ---set "billing.enabled=true" ---set "billing.storage.provider=azure" ---set "billing.storage.azure.storageAccount=${SPACES_BILLING_STORAGE_ACCOUNT}" ---set "billing.storage.azure.container=${SPACES_BILLING_STORAGE_CONTAINER}" ---set "billing.storage.secretRef.name=" ---set controlPlanes.vector.serviceAccount.customAnnotations."azure\.workload\.identity/client-id"="${SPACES_BILLING_APP_ID}" ---set controlPlanes.vector.pod.customLabels."azure\.workload\.identity/use"="true" -``` - -Create a federated credential to establish trust between the managed identity -and your AKS OIDC provider: - -```shell -az identity federated-credential create \ - --name billing-federated-identity \ - --identity-name billing-identity \ - --resource-group ${YOUR_RESOURCE_GROUP} \ - --issuer ${AKS_OIDC_ISSUER} \ - --subject system:serviceaccount:${YOUR_CONTROL_PLANE_NAMESPACE}:vector -``` - - - - - -Upbound supports workload-identity configurations in GCP with IAM principal -identifiers or service account impersonation. - -#### IAM principal identifiers - -IAM principal identifiers allow you to grant permissions directly to -Kubernetes service accounts without additional annotation. Upbound recommends -this approach for ease-of-use and flexibility. - -First, enable Workload Identity Federation on your GKE cluster: - -```shell -gcloud container clusters update ${YOUR_CLUSTER_NAME} \ - --workload-pool=${YOUR_PROJECT_ID}.svc.id.goog \ - --region=${YOUR_REGION} -``` - -Next, configure your Spaces installation with the Spaces Helm chart parameters: - -```shell ---set "billing.enabled=true" ---set "billing.storage.provider=gcp" ---set "billing.storage.gcp.bucket=${YOUR_BILLING_BUCKET}" ---set "billing.storage.secretRef.name=" -``` - -:::important -You **must** set the `billing.storage.secretRef.name` to an empty string to -enable workload identity for the billing component. -::: - -Grant the necessary permissions to your Kubernetes service account: - -```shell -gcloud projects add-iam-policy-binding ${YOUR_PROJECT_ID} \ - --member="principalSet://iam.googleapis.com/projects/${YOUR_PROJECT_NUMBER}/locations/global/workloadIdentityPools/${YOUR_PROJECT_ID}.svc.id.goog/attribute.kubernetes_namespace/${YOUR_CONTROL_PLANE_NAMESPACE}/attribute.kubernetes_service_account/vector" \ - --role="roles/storage.objectAdmin" -``` - -Enable uniform bucket-level access on your storage bucket: - -```shell -gcloud storage buckets update gs://${YOUR_BILLING_BUCKET} --uniform-bucket-level-access -``` - -#### Service account impersonation - -Service account impersonation allows you to link a Kubernetes service account to -a GCP service account. The Kubernetes service account assumes the permissions of -the GCP service account you specify. - -Enable workload id federation on your GKE cluster: - -```shell -gcloud container clusters update ${YOUR_CLUSTER_NAME} \ - --workload-pool=${YOUR_PROJECT_ID}.svc.id.goog \ - --region=${YOUR_REGION} -``` - -Next, create a dedicated service account for your billing operations: - -```shell -gcloud iam service-accounts create billing-sa \ - --project=${YOUR_PROJECT_ID} -``` - -Grant storage permissions to the service account you created: - -```shell -gcloud projects add-iam-policy-binding ${YOUR_PROJECT_ID} \ - --member="serviceAccount:billing-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" \ - --role="roles/storage.objectAdmin" -``` - -Link the Kubernetes service account to the GCP service account: - -```shell -gcloud iam service-accounts add-iam-policy-binding \ - billing-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com \ - --role="roles/iam.workloadIdentityUser" \ - --member="serviceAccount:${YOUR_PROJECT_ID}.svc.id.goog[${YOUR_CONTROL_PLANE_NAMESPACE}/vector]" -``` - -In your control plane, pass the `--set` flag with the Spaces Helm chart -parameters for the billing component: - -```shell ---set "billing.enabled=true" ---set "billing.storage.provider=gcp" ---set "billing.storage.gcp.bucket=${YOUR_BILLING_BUCKET}" ---set "billing.storage.secretRef.name=" ---set controlPlanes.vector.serviceAccount.customAnnotations."iam\.gke\.io/gcp-service-account"="billing-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" -``` - - - -## Verify your configuration - -After you apply the configuration use `kubectl` to verify the service account -has the correct annotation: - -```shell -kubectl get serviceaccount vector -n ${YOUR_CONTROL_PLANE_NAMESPACE} -o yaml -``` - -Verify the `vector` pod is running: - -```shell -kubectl get pods -n ${YOUR_CONTROL_PLANE_NAMESPACE} | grep vector -``` - -## Restart workload - - - -You must manually restart a workload's pod when you add the -`eks.amazonaws.com/role-arn key` annotation to the running pod's service -account. - -This restart enables the EKS pod identity webhook to inject the necessary -environment for using IRSA. - - - - - -You must manually restart a workload's pod when you add the workload identity annotations to the running pod's service account. - -This restart enables the workload identity webhook to inject the necessary -environment for using Azure workload identity. - - - - - -GCP workload identity doesn't require pod restarts after configuration changes. -If you do need to restart the workload, use the `kubectl` command to force the -component restart: - - - -```shell -kubectl rollout restart deployment vector -``` - - -## Use cases - - -Using workload identity authentication for billing eliminates the need for static -credentials in your cluster as well as the overhead of credential rotation. -These benefits are helpful in: - -* Resource usage tracking across teams/projects -* Cost allocation for multi-tenant environments -* Financial auditing requirements -* Capacity billing and resource optimization -* Automated billing workflows - -## Next steps - -Now that you have workload identity configured for the billing component, visit -the [Billing guide][billing-guide] for more information. - -Other workload identity guides are: -* [Backup and restore][backuprestore] -* [Shared Secrets][secrets] - -[billing-guide]: /self-hosted-spaces/howtos/billing -[backuprestore]: /self-hosted-spaces/howtos/workload-id/backup-restore-config -[secrets]: /self-hosted-spaces/howtos/workload-id/eso-config diff --git a/self-hosted-spaces_versioned_docs/version-1.16/howtos/workload-id/eso-config.md b/self-hosted-spaces_versioned_docs/version-1.16/howtos/workload-id/eso-config.md deleted file mode 100644 index 90ac6ba68..000000000 --- a/self-hosted-spaces_versioned_docs/version-1.16/howtos/workload-id/eso-config.md +++ /dev/null @@ -1,503 +0,0 @@ ---- -title: Shared Secrets Workload ID -weight: 1 -description: Configure workload identity for Spaces Shared Secrets ---- -import GlobalLanguageSelector, { CodeBlock } from '@site/src/components/GlobalLanguageSelector'; - - - - - - - -Workload-identity authentication lets you use access policies to grant your -self-hosted Space cluster access to your cloud providers. Workload identity -authentication grants temporary AWS credentials to your Kubernetes pod based on -a service account. Assigning IAM roles and service accounts allows the pod to -assume the IAM role dynamically and much more securely than static credentials. - -This guide walks you through creating an IAM trust role policy and applying it to your EKS -cluster for secret sharing with Kubernetes. - - - - - -Workload-identity authentication lets you use access policies to grant your -self-hosted Space cluster access to your cloud providers. Workload identity -authentication grants temporary Azure credentials to your Kubernetes pod based on -a service account. Assigning managed identities and service accounts allows the pod to -authenticate with Azure resources dynamically and much more securely than static credentials. - -This guide walks you through creating a managed identity and federated credential for your AKS -cluster for shared secrets in your Space cluster. - - - - - -Workload-identity authentication lets you use access policies to grant your -self-hosted Space cluster access to your cloud providers. Workload identity -authentication grants temporary GCP credentials to your Kubernetes pod based on -a service account. Assigning IAM roles and service accounts allows the pod to -access cloud resources dynamically and much more securely than static -credentials. - -This guide walks you through configuring workload identity for your GKE -cluster's Shared Secrets component. - - - -## Prerequisites - - -To set up a workload-identity, you'll need: - - -- A self-hosted Space cluster -- Administrator access in your cloud provider -- Helm and `kubectl` - - -## About the Shared Secrets component - - - - -The External Secrets Operator (ESO) runs in each control plane's host namespace as `external-secrets-controller`. It needs to access -your external secrets management service like AWS Secrets Manager. - -To configure your shared secrets workflow controller, you must: - -* Annotate the Kubernetes service account to associate it with a cloud-side - principal (such as an IAM role, service account, or enterprise application). The workload must then - use this service account. -* Label the workload (pod) to allow the injection of a temporary credential set, - enabling authentication. - - - - - -The External Secrets Operator (ESO) component runs in each control plane's host -namespace as `external-secrets-controller`. It synchronizes secrets from -external APIs into Kubernetes secrets. Shared secrets allow you to manage -credentials outside your Kubernetes cluster while making them available to your -application - - - - - -The External Secrets Operator (ESO) component runs in each control plane's host -namespace as `external-secrets-controller`. It synchronizes secrets from -external APIs into Kubernetes secrets. Shared secrets allow you to manage -credentials outside your Kubernetes cluster while making them available to your -application - - - -## Configuration - - - -Upbound supports workload-identity configurations in AWS with IAM Roles for -Service Accounts or EKS pod identity association. - -#### IAM Roles for Service Accounts (IRSA) - -With IRSA, you can associate a Kubernetes service account in an EKS cluster with -an AWS IAM role. Upbound authenticates workloads with that service account as -the IAM role using temporary credentials instead of static role credentials. -IRSA relies on AWS `AssumeRoleWithWebIdentity` `STS` to exchange OIDC ID tokens with -the IAM role's temporary credentials. IRSA uses the `eks.amazon.aws/role-arn` -annotation to link the service account and the IAM role. - -**Create an IAM role and trust policy** - -First, create an IAM role with appropriate permissions to access AWS Secrets Manager: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "secretsmanager:GetSecretValue", - "secretsmanager:DescribeSecret", - "ssm:GetParameter" - ], - "Resource": [ - "arn:aws:secretsmanager:${YOUR_REGION}:${YOUR_AWS_ACCOUNT_ID}:secret:${YOUR_SECRET_PREFIX}*", - "arn:aws:ssm:${YOUR_REGION}:${YOUR_AWS_ACCOUNT_ID}:parameter/${YOUR_PARAMETER_PREFIX}*" - ] - } - ] -} -``` - -You must configure the IAM role trust policy with the exact match for each -provisioned control plane. An example of a trust policy for a single control -plane is below: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": "arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:oidc-provider/${YOUR_OIDC_PROVIDER}" - }, - "Action": "sts:AssumeRoleWithWebIdentity", - "Condition": { - "StringEquals": { - ":aud": "sts.amazonaws.com" - }, - "StringLike": { - ":sub": "system:serviceaccount:*:external-secrets-controller" - } - } - } - ] -} -``` - -**Configure the EKS OIDC provider** - -Next, ensure your EKS cluster has an OIDC identity provider: - -```shell -eksctl utils associate-iam-oidc-provider --cluster ${YOUR_CLUSTER_NAME} --approve -``` - -**Apply the IAM role** - -In your control plane, pass the `--set` flag with the Spaces Helm chart -parameters for the shared secrets component: - -```yaml ---set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."eks\.amazonaws\.com/role-arn"="arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:role/${YOUR_ESO_ROLE_NAME}" -``` - -This command allows the shared secrets component to authenticate with your -dedicated IAM role in your EKS cluster environment. - -#### EKS pod identities - -Upbound also supports EKS Pod Identity configuration. EKS Pod Identities allow -you to create a pod identity association with your Kubernetes namespace, a -service account, and an IAM role, which allows the EKS control plane to -automatically handle the credential exchange. - -**Create an IAM role** - -First, create an IAM role with appropriate permissions to access AWS Secrets Manager: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "secretsmanager:GetSecretValue", - "secretsmanager:DescribeSecret", - "ssm:GetParameter" - ], - "Resource": [ - "arn:aws:secretsmanager:${YOUR_AWS_REGION}:${YOUR_AWS_ACCOUNT_ID}:secret:${YOUR_SECRET_PREFIX}*", - "arn:aws:ssm:${YOUR_AWS_REGION}:${YOUR_AWS_ACCOUNT_ID}:parameter/${YOUR_PARAMETER_PREFIX}*" - ] - } - ] -} -``` - -**Configure your Space with Helm** - -When you install or upgrade your Space with Helm, add the shared secrets value: - -```shell -helm upgrade spaces spaces-helm-chart \ - --set "sharedSecrets.enabled=true" -``` - -**Create a Pod Identity Association** - -After Upbound provisions your control plane, create a Pod Identity Association -with the `aws` CLI: - -```shell -aws eks create-pod-identity-association \ - --cluster-name ${YOUR_CLUSTER_NAME} \ - --namespace ${YOUR_CONTROL_PLANE_NAMESPACE} \ - --service-account external-secrets-controller \ - --role-arn arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:role/${YOUR_ROLE_NAME} -``` - - - - - -Upbound supports workload-identity configurations in Azure with Azure's built-in -workload identity feature. - -First, enable the OIDC issuer and workload identity in your AKS cluster: - -```shell -az aks update --resource-group ${YOUR_RESOURCE_GROUP} --name ${YOUR_AKS_CLUSTER_NAME} --enable-oidc-issuer --enable-workload-identity -``` - -Next, find and store the OIDC issuer URL as an environment variable: - -```shell -export AKS_OIDC_ISSUER="$(az aks show --name ${YOUR_AKS_CLUSTER_NAME} --resource-group ${YOUR_RESOURCE_GROUP} --query "oidcIssuerProfile.issuerUrl" --output tsv)" -``` - -Create a new managed identity to associate with the shared secrets component: - -```shell -az identity create --name secrets-identity --resource-group ${YOUR_RESOURCE_GROUP} --location ${YOUR_LOCATION} -``` - -Retrieve the client ID and store it as an environment variable: - -```shell -export USER_ASSIGNED_CLIENT_ID="$(az identity show --name secrets-identity --resource-group ${YOUR_RESOURCE_GROUP} --query clientId -otsv)" -``` - -Grant the managed identity you created to access your Azure Storage account: - -```shell -az keyvault set-policy --name ${YOUR_KEY_VAULT_NAME} \ - --resource-group ${YOUR_RESOURCE_GROUP} \ - --object-id $(az identity show --name secrets-identity --resource-group ${YOUR_RESOURCE_GROUP} --query principalId -otsv) \ - --secret-permissions get list -``` - -In your control plane, pass the `--set` flag with the Spaces Helm chart -parameters for the shared secrets component: - -```shell ---set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."azure\.workload\.identity/client-id"="${USER_ASSIGNED_CLIENT_ID}" ---set controlPlanes.sharedSecrets.pod.customLabels."azure\.workload\.identity/use"="true" -``` - -Next, create a federated credential to establish trust between the managed identity -and your AKS OIDC provider: - -```shell -az identity federated-credential create \ - --name secrets-federated-identity \ - --identity-name secrets-identity \ - --resource-group ${YOUR_RESOURCE_GROUP} \ - --issuer ${AKS_OIDC_ISSUER} \ - --subject system:serviceaccount:${YOUR_CONTROL_PLANE_NAMESPACE}:external-secrets-controller -``` - - - - - -Upbound supports workload-identity configurations in GCP with IAM principal -identifiers or service account impersonation. - -#### IAM principal identifiers - -IAM principal identifiers allow you to grant permissions directly to -Kubernetes service accounts without additional annotation. Upbound recommends -this approach for ease-of-use and flexibility. - -First, enable Workload Identity Federation on your GKE cluster: - -```shell -gcloud container clusters update ${YOUR_CLUSTER_NAME} \ - --workload-pool=${YOUR_PROJECT_ID}.svc.id.goog \ - --region=${YOUR_REGION} -``` - -Next, grant the necessary permissions to your Kubernetes service account: - -```shell -gcloud projects add-iam-policy-binding ${YOUR_PROJECT_ID} \ - --member="principalSet://iam.googleapis.com/projects/${YOUR_PROJECT_NUMBER}/locations/global/workloadIdentityPools/${YOUR_PROJECT_ID}.svc.id.goog/attribute.kubernetes_namespace/${YOUR_CONTROL_PLANE_NAMESPACE}/attribute.kubernetes_service_account/external-secrets-controller" \ - --role="roles/secretmanager.secretAccessor" -``` - -#### Service account impersonation - -Service account impersonation allows you to link a Kubernetes service account to -a GCP service account. The Kubernetes service account assumes the permissions of -the GCP service account you specify. - -Enable workload id federation on your GKE cluster: - -```shell -gcloud container clusters update ${YOUR_CLUSTER_NAME} \ - --workload-pool=${YOUR_PROJECT_ID}.svc.id.goog \ - --region=${YOUR_REGION} -``` - -Next, create a dedicated service account for your secrets operations: - -```shell -gcloud iam service-accounts create secrets-sa \ - --project=${YOUR_PROJECT_ID} -``` - -Grant secret access permissions to the service account you created: - -```shell -gcloud projects add-iam-policy-binding ${YOUR_PROJECT_ID} \ - --member="serviceAccount:secrets-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" \ - --role="roles/secretmanager.secretAccessor" -``` - -Link the Kubernetes service account to the GCP service account: - -```shell -gcloud iam service-accounts add-iam-policy-binding \ - secrets-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com \ - --role="roles/iam.workloadIdentityUser" \ - --member="serviceAccount:${YOUR_PROJECT_ID}.svc.id.goog[${YOUR_CONTROL_PLANE_NAMESPACE}/external-secrets-controller]" -``` - -In your control plane, pass the `--set` flag with the Spaces Helm chart -parameters for the shared secrets component: - -```shell ---set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."iam\.gke\.io/gcp-service-account"="secrets-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" -``` - - - -## Verify your configuration - -After you apply the configuration use `kubectl` to verify the service account -has the correct annotation: - -```shell -kubectl get serviceaccount external-secrets-controller -n ${YOUR_CONTROL_PLANE_NAMESPACE} -o yaml -``` - - - -Verify the `external-secrets` pod is running correctly: - -```shell -kubectl get pods -n ${YOUR_CONTROL_PLANE_NAMESPACE} | grep external-secrets -``` - - - - - -Verify the External Secrets Operator pod is running correctly: - -```shell -kubectl get pods -n ${YOUR_CONTROL_PLANE_NAMESPACE} | grep external-secrets -``` - - - - - -Verify the `external-secrets` pod is running correctly: - -```shell -kubectl get pods -n ${YOUR_CONTROL_PLANE_NAMESPACE} | grep external-secrets -``` - - - -## Restart workload - - - -You must manually restart a workload's pod when you add the -`eks.amazonaws.com/role-arn key` annotation to the running pod's service -account. - -This restart enables the EKS pod identity webhook to inject the necessary -environment for using IRSA. - - - - - -You must manually restart a workload's pod when you add the workload identity annotations to the running pod's service account. - -This restart enables the workload identity webhook to inject the necessary -environment for using Azure workload identity. - - - - - -GCP workload identity doesn't require pod restarts after configuration changes. -If you do need to restart the workload, use the `kubectl` command to force the -component restart: - - - -```shell -kubectl rollout restart deployment external-secrets -``` - -## Use cases - - - - -Shared secrets with workload identity eliminates the need for static credentials -in your cluster. These benefits are particularly helpful in: - -* Secure application credentials management -* Database connection string storage -* API token management -* Compliance with secret rotation security standards -* Multi-environment configuration with centralized secret management - - - - - -Using workload identity authentication for shared secrets eliminates the need for static -credentials in your cluster as well as the overhead of credential rotation. -These benefits are particularly helpful in: - -* Secure application credentials management -* Database connection string storage -* API token management -* Compliance with secret rotation security standards - - - - - -Configuring the external secrets operator with workload identity eliminates the need for -static credentials in your cluster and the overhead of credential rotation. -These benefits are particularly helpful in: - -* Secure application credentials management -* Database connection string storage -* API token management -* Compliance with secret rotation security standards - - - -## Next steps - -Now that you have workload identity configured for the shared secrets component, visit -the [Shared Secrets][eso-guide] guide for more information. - -Other workload identity guides are: -* [Backup and restore][backuprestore] -* [Billing][billing] - -[eso-guide]: /self-hosted-spaces/howtos/secrets-management -[backuprestore]: /self-hosted-spaces/howtos/workload-id/backup-restore-config -[billing]: /self-hosted-spaces/howtos/workload-id/billing-config diff --git a/self-hosted-spaces_versioned_docs/version-1.16/overview/_category_.json b/self-hosted-spaces_versioned_docs/version-1.16/overview/_category_.json deleted file mode 100644 index 54bb16430..000000000 --- a/self-hosted-spaces_versioned_docs/version-1.16/overview/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "Overview", - "position": 0 -} diff --git a/self-hosted-spaces_versioned_docs/version-1.16/overview/index.md b/self-hosted-spaces_versioned_docs/version-1.16/overview/index.md deleted file mode 100644 index f1fd4de42..000000000 --- a/self-hosted-spaces_versioned_docs/version-1.16/overview/index.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: Self-Hosted Spaces -sidebar_position: 0 ---- - -Upbound Spaces is the platform for running managed Crossplane control planes at -scale. Spaces handles the lifecycle, networking, and operations of your control -planes so you can focus on building your platform. - -Crossplane is the open-source foundation that enables infrastructure -provisioning and management through Kubernetes APIs. The Crossplane control -plane is a Kubernetes cluster running Crossplane that can provision and manage -resources across multiple providers. - -Upbound can run as a hosted service in our Cloud Space or you can host your own -Upbound instance as a Self-Hosted Space. - -## Cloud Spaces - -Upbound hosts and manages the Spaces infrastructure for you. Cloud Spaces offers -two forms of tenancy: - -- **Cloud Spaces**: Multi-tenant, Upbound-hosted and Upbound-managed environment. -- **Dedicated Spaces**: Single-tenant, Upbound-hosted and Upbound-managed - environment with additional isolation guarantees. - -Use Cloud Spaces if you want a fully managed SaaS experience with no cluster to -maintain. See the [Cloud Spaces documentation](/cloud-spaces/overview/). - -## Self-Hosted Spaces (you are here) - -You run the Spaces software on your own Kubernetes cluster. You deploy and manage Spaces on your own cluster. - -Use Self-Hosted Spaces if you need control over your infrastructure or data -residency. - -## Get Started with Self-Hosted Spaces - -- **[Concepts](/self-hosted-spaces/concepts/control-planes/)** — Core concepts for Spaces -- **[How-To Guides](/self-hosted-spaces/howtos/auto-upgrade/)** — Step-by-step guides for operating Spaces -- **[API Reference](/self-hosted-spaces/reference/)** — API specifications and resources diff --git a/self-hosted-spaces_versioned_docs/version-1.16/reference/_category_.json b/self-hosted-spaces_versioned_docs/version-1.16/reference/_category_.json deleted file mode 100644 index 4a6a139c4..000000000 --- a/self-hosted-spaces_versioned_docs/version-1.16/reference/_category_.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "label": "Spaces API", - "position": 1, - "collapsed": true -} diff --git a/self-hosted-spaces_versioned_docs/version-1.16/reference/index.md b/self-hosted-spaces_versioned_docs/version-1.16/reference/index.md deleted file mode 100644 index 8336314ed..000000000 --- a/self-hosted-spaces_versioned_docs/version-1.16/reference/index.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -title: Spaces API Reference -description: Documentation for the Spaces API resources (v1.16 - Latest) -sidebar_position: 1 ---- -import CrdDocViewer from '@site/src/components/CrdViewer'; - - -This page documents the Custom Resource Definitions (CRDs) for the Spaces API. - - -## Control Planes -### Control Planes - - -## Observability -### Shared Telemetry Configs - - -## `pkg` -### Controller Revisions - - -### Controller Runtime Configs - - -### Controllers - - -### Remote Configuration Revisions - - -### Remote Configurations - - -## Proxies -### Proxies - - -## References -### Referenced Objects - - -## Scheduling -### Environments - - -## Secrets -### Shared External Secrets - - -### Shared Secret Stores - - -## Simulations - - -## Spaces Backups -### Backups - - -### Backup Schedules - - -### Shared Backup Configs - - -### Shared Backups - - -### Shared Backup Schedules - diff --git a/self-hosted-spaces_versioned_docs/version-1.16/self-hosted-spaces-quickstart.md b/self-hosted-spaces_versioned_docs/version-1.16/self-hosted-spaces-quickstart.md deleted file mode 100644 index 4cf4a022e..000000000 --- a/self-hosted-spaces_versioned_docs/version-1.16/self-hosted-spaces-quickstart.md +++ /dev/null @@ -1,227 +0,0 @@ ---- -title: Quickstart with Kind -sidebar_position: 1 -description: Deploy a self-hosted Upbound Space on a local kind cluster -tier: "business" ---- - -Get started with Upbound Spaces using a local `kind` cluster. This guide walks you through deploying a self-hosted Space, creating your first control plane, and optionally connecting your Space to the Upbound Console. - -:::info -Self-hosted Spaces are a business critical feature of Upbound and require a license token. [Contact Upbound](https://www.upbound.io/contact) if you want to try out self-hosted Spaces. -::: - -## Prerequisites - -- An Upbound organization account string, provided by your Upbound account representative -- A `token.json` license, provided by your Upbound account representative -- [`kind`](https://kind.sigs.k8s.io/docs/user/quick-start/#installation) installed locally -- The [`up` CLI](../../../manuals/cli/overview/) v0.37.0 or newer - -## Provision a kind cluster - -Create a `kind` cluster with the required ingress configuration for Spaces: - -```bash -cat < -``` - -Export the path to your license token file: - -```bash -export SPACES_TOKEN_PATH="/path/to/token.json" -``` - -Set the Spaces version you want to install: - -```bash -export SPACES_VERSION= -``` - -## Install Spaces - -Make sure your `kubectl` context is pointed at the `kind` cluster, then install the Spaces software: - -```bash -up space init --organization="${UPBOUND_ACCOUNT}" \ - --token-file="${SPACES_TOKEN_PATH}" \ - "v${SPACES_VERSION}" -``` - -The `up` CLI automatically detects missing prerequisites and prompts you to install them. - -## Connect to your Space - -Point your kubeconfig context at the new Space: - -```bash -up ctx disconnected/kind-kind -``` - -## Create your first control plane - -Create a control plane in your Space: - -```bash -up ctp create controlplane1 -``` - -The first control plane takes around 5 minutes to reach a `READY` state. Check the status with: - -```bash -up ctp list -``` - -## Connect to your control plane - -With your kubeconfig still pointed at the Space cluster, connect to the control plane: - -```bash -up ctx ./default/controlplane1 -``` - -Verify the connection by listing CRDs on the control plane: - -```bash -kubectl get crds -``` - -To disconnect and return to your previous context: - -```bash -up ctx - -``` - -:::tip -Learn more about navigating Upbound with the [`up ctx` command reference](../../../manuals/cli/overview/). -::: - -## Connect your Space to Upbound (optional) - -You can connect your self-hosted Space to the Upbound Console for a unified operations and debugging experience. - -### Prerequisites - -- An existing Upbound account -- The `up` CLI installed and logged into your organization -- `kubectl` configured with the kubecontext of your Space cluster - -### Enable the Query API - -Connecting a Space requires the Query API to be enabled. Pass the following flags when running `up space init`: - -```bash -up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ - --set "features.alpha.apollo.enabled=true" \ - --set "features.alpha.apollo.storage.postgres.create=true" -``` - -This creates a PostgreSQL cluster managed by [CloudNativePG](https://cloudnative-pg.io/). To use your own PostgreSQL instance instead, set `features.alpha.apollo.storage.postgres.create=false` and supply connection details at `features.alpha.apollo.storage.postgres.connection`. - -### Connect the Space - -Set a name for your self-hosted Space: - -```bash -export UPBOUND_SPACE_NAME=your-self-hosted-space -``` - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - - - - -Log into Upbound: - -```bash -up login -``` - -Connect the Space to the Console: - -```bash -up space connect "${UPBOUND_SPACE_NAME}" -``` - -This installs a Connect agent, creates a service account, and configures permissions in the `upbound-system` namespace of your Space. - - - - -Export your Upbound org name (run `up org list` to find it): - -```bash -export UPBOUND_ORG_NAME=your-org-name -``` - -Create a robot and export its token: - -```bash -up robot create "${UPBOUND_SPACE_NAME}" --description="Robot used for connect agent" -export UPBOUND_TOKEN=$(up robot token create "${UPBOUND_SPACE_NAME}" "${UPBOUND_SPACE_NAME}-token" -ojson | jq -r .token) -``` - -Create a secret containing the robot token: - -```bash -kubectl create secret -n upbound-system generic connect-token \ - --from-literal=token="${UPBOUND_TOKEN}" -``` - -Log into the Helm OCI registry using your license token: - -```bash -jq -r .token $SPACES_TOKEN_PATH | helm registry login xpkg.upbound.io -u $(jq -r .accessId $SPACES_TOKEN_PATH) --password-stdin -``` - -Install the connect agent: - -```bash -helm -n upbound-system upgrade --install agent \ - oci://xpkg.upbound.io/spaces-artifacts/agent \ - --version "0.0.0-1116.g14cbfe6" \ - --set "global.space=${UPBOUND_SPACE_NAME}" \ - --set "global.organization=${UPBOUND_ORG_NAME}" \ - --set "global.tokenSecret=connect-token" \ - --set "image.repository=xpkg.upbound.io/spaces-artifacts/agent" \ - --set "registration.image.repository=xpkg.upbound.io/spaces-artifacts/register-init" \ - --set "registration.enabled=true" \ - --set "imagePullSecrets[0].name=upbound-pull-secret" \ - --set "billing.enabled=false" \ - --wait -``` - - - - -### View your Space in the Console - -Go to the [Upbound Console](https://console.upbound.io), log in, and select your newly connected Space from the Space selector. - -:::note -A self-hosted Space can only be connected to a single organization at a time. -::: diff --git a/self-hosted-spaces_versioned_sidebars/version-1.16-sidebars.json b/self-hosted-spaces_versioned_sidebars/version-1.16-sidebars.json deleted file mode 100644 index 9c534e861..000000000 --- a/self-hosted-spaces_versioned_sidebars/version-1.16-sidebars.json +++ /dev/null @@ -1,130 +0,0 @@ -{ - "sidebar": [ - { - "type": "doc", - "id": "overview/index", - "label": "Overview" - }, - { - "type": "doc", - "id": "self-hosted-spaces-quickstart" - }, - { - "type": "category", - "label": "Concepts", - "items": [ - "concepts/control-planes", - "concepts/groups" - ] - }, - { - "type": "category", - "label": "Deploy", - "items": [ - "howtos/deployment-reqs", - "howtos/self-hosted-spaces-deployment", - "howtos/certs", - "howtos/ingress", - "howtos/ingress-nginx-migration", - "howtos/mirror-images", - "howtos/attach-detach" - ] - }, - { - "type": "category", - "label": "Configure", - "items": [ - "howtos/configure-ha", - "howtos/oidc-configuration", - "howtos/hub-rbac", - "howtos/scaling-resources", - "howtos/spaces-management" - ] - }, - { - "type": "category", - "label": "Control Planes", - "items": [ - "howtos/managed-service", - "howtos/control-plane-topologies", - "howtos/auto-upgrade", - "howtos/declarative-ctps", - "howtos/controllers", - "howtos/migrating-to-mcps", - "howtos/simulations" - ] - }, - { - "type": "category", - "label": "Connect", - "items": [ - "howtos/api-connector", - "howtos/ctp-connector", - "howtos/mcp-connector-guide", - "howtos/query-api", - "howtos/deploy-query-api" - ] - }, - { - "type": "category", - "label": "Observe & Debug", - "items": [ - "howtos/observability", - "howtos/space-observability", - { - "type": "category", - "label": "Tracing", - "items": [ - "howtos/tracing/overview", - "howtos/tracing/query-api", - "howtos/tracing/spaces-api", - "howtos/tracing/spaces-router" - ] - }, - "howtos/ctp-audit-logs", - "howtos/debugging-a-ctp", - "howtos/troubleshooting" - ] - }, - { - "type": "category", - "label": "GitOps & Automation", - "items": [ - "howtos/automation-and-gitops/overview", - "howtos/gitops", - "howtos/use-argo" - ] - }, - { - "type": "category", - "label": "Data & Security", - "items": [ - "howtos/dr", - "howtos/backup-and-restore", - "howtos/secrets-management", - { - "type": "category", - "label": "Workload Identity", - "items": [ - "howtos/workload-id/backup-restore-config", - "howtos/workload-id/billing-config", - "howtos/workload-id/eso-config" - ] - } - ] - }, - { - "type": "category", - "label": "Billing & Licensing", - "items": [ - "howtos/billing", - "howtos/capacity-licensing" - ] - }, - { - "type": "doc", - "id": "reference/index", - "label": "API Reference" - } - ] -} diff --git a/self-hosted-spaces_versions.json b/self-hosted-spaces_versions.json index c21a9e8fb..17cecbce2 100644 --- a/self-hosted-spaces_versions.json +++ b/self-hosted-spaces_versions.json @@ -1 +1 @@ -["1.16","1.15","1.14", "1.13"] +["1.15", "1.14", "1.13"] diff --git a/src/theme/DocSidebar/Desktop/Content/index.js b/src/theme/DocSidebar/Desktop/Content/index.js index 477b212a3..fd0bf37e0 100644 --- a/src/theme/DocSidebar/Desktop/Content/index.js +++ b/src/theme/DocSidebar/Desktop/Content/index.js @@ -13,11 +13,13 @@ import styles from './styles.module.css'; const versionsJson = require('../../../../../self-hosted-spaces_versions.json'); const LATEST = 'latest'; +// Must match `versions.current.label` for the self-hosted-spaces plugin in docusaurus.config.js. +const LATEST_VERSION = '1.16'; -const versions = versionsJson.map((version, index) => ({ - label: index === 0 ? `${version} (Latest)` : version, - value: index === 0 ? LATEST : version, -})); +const versions = [ + { label: `${LATEST_VERSION} (Latest)`, value: LATEST }, + ...versionsJson.map((version) => ({ label: version, value: version })), +]; function getVersionFromPath(pathname) { const segments = pathname.split('/').filter(Boolean); From a50dc359ad5cff8ca0f6befb8c9f05206362a50f Mon Sep 17 00:00:00 2001 From: Rae Sharp Date: Thu, 7 May 2026 16:40:18 -0400 Subject: [PATCH 2/2] corrects dark mode on version dropdown --- src/css/custom.css | 22 +++++++++++++++++++ src/theme/DocSidebar/Desktop/Content/index.js | 6 ++--- src/theme/Root.js | 2 +- 3 files changed, 26 insertions(+), 4 deletions(-) diff --git a/src/css/custom.css b/src/css/custom.css index 84a17742b..5f585514d 100644 --- a/src/css/custom.css +++ b/src/css/custom.css @@ -271,6 +271,28 @@ nav[aria-label="Main"] .text-neutral-0, nav[aria-label="Main"] .text-neutral-0:h color: rgb(255 255 255 / var(--tw-text-opacity)); } +/* Spaces sidebar version selector — @upbound/elements Select uses Tailwind */ +/* utility classes (bg-background, border-input, etc.) that are compiled with */ +/* hardcoded light values and no `.dark:` variants, so dark mode needs explicit overrides. */ +html[data-theme="dark"] .spaces-version-trigger, +html[data-theme="dark"] .spaces-version-content { + background-color: var(--ifm-background-color); + border-color: var(--upbound-border-color); + color: var(--ifm-font-color-base); +} +html[data-theme="dark"] .spaces-version-trigger svg, +html[data-theme="dark"] .spaces-version-content svg { + color: var(--ifm-font-color-base); +} +html[data-theme="dark"] .spaces-version-item { + color: var(--ifm-font-color-base); +} +html[data-theme="dark"] .spaces-version-item:focus, +html[data-theme="dark"] .spaces-version-item[data-highlighted] { + background-color: var(--upbound-light-bg); + color: var(--ifm-font-color-base); +} + /* Navbar dropdown popover - theme agnostic */ /* Target navigation menu dropdowns rendered via Radix portal */ /* Dropdown has white background in both modes, so always use dark text */ diff --git a/src/theme/DocSidebar/Desktop/Content/index.js b/src/theme/DocSidebar/Desktop/Content/index.js index fd0bf37e0..0c1ba6cdd 100644 --- a/src/theme/DocSidebar/Desktop/Content/index.js +++ b/src/theme/DocSidebar/Desktop/Content/index.js @@ -54,12 +54,12 @@ export default function DocSidebarDesktopContentWrapper(props) { value={getVersionFromPath(location.pathname)} onValueChange={handleVersionChange} > - + - + {versions.map((v) => ( - + {v.label} ))} diff --git a/src/theme/Root.js b/src/theme/Root.js index b42254b8c..5fba37f52 100644 --- a/src/theme/Root.js +++ b/src/theme/Root.js @@ -24,7 +24,7 @@ export default function Root({ children }) { useEffect(() => { if (!isBrowser) return; - + // Add Scarf pixel const scarfImg = document.createElement('img'); scarfImg.referrerPolicy = 'no-referrer-when-downgrade';