diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 000000000..e9ff035b8 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,34 @@ +# Dependabot configuration for monorepo +# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates + +version: 2 +updates: + # Go modules for controller + - package-ecosystem: "gomod" + directory: "/controller" + schedule: + interval: weekly + + # Go modules for operator + - package-ecosystem: "gomod" + directory: "/controller/deploy/operator" + schedule: + interval: weekly + + # Python dependencies + - package-ecosystem: "pip" + directory: "/python" + schedule: + interval: weekly + + # Devcontainers + - package-ecosystem: "devcontainers" + directory: "/" + schedule: + interval: weekly + + # GitHub Actions + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: weekly diff --git a/python/.github/workflows/backport.yml b/.github/workflows/backport.yaml similarity index 100% rename from python/.github/workflows/backport.yml rename to .github/workflows/backport.yaml diff --git a/.github/workflows/build-images.yaml b/.github/workflows/build-images.yaml new file mode 100644 index 000000000..7dd9b3614 --- /dev/null +++ b/.github/workflows/build-images.yaml @@ -0,0 +1,186 @@ +name: Build and push container images + +on: + workflow_dispatch: + push: + tags: + - '*' + branches: + - main + - 'release-*' + merge_group: + +env: + PUSH: ${{ github.repository_owner == 'jumpstarter-dev' && (github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/') || startsWith(github.ref, 'refs/heads/release-')) }} + REGISTRY: quay.io + QUAY_ORG: quay.io/jumpstarter-dev + +jobs: + build-and-push-image: + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + attestations: write + id-token: write + strategy: + matrix: + include: + # Controller images + - image_name: jumpstarter-dev/jumpstarter-controller + dockerfile: controller/Dockerfile + context: controller + - image_name: jumpstarter-dev/jumpstarter-operator + dockerfile: controller/Dockerfile.operator + context: controller + - image_name: jumpstarter-dev/jumpstarter-operator-bundle + dockerfile: controller/deploy/operator/bundle.Dockerfile + context: controller/deploy/operator + # Python images (use repo root context for .git access needed by hatch-vcs) + - image_name: jumpstarter-dev/jumpstarter + dockerfile: python/Dockerfile + context: . + - image_name: jumpstarter-dev/jumpstarter-utils + dockerfile: python/Dockerfile.utils + context: python + - image_name: jumpstarter-dev/jumpstarter-dev + dockerfile: python/.devfile/Containerfile + context: python + - image_name: jumpstarter-dev/jumpstarter-devspace + dockerfile: python/.devfile/Containerfile.client + context: . + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Get version + run: | + VERSION=$(git describe --tags) + VERSION=${VERSION#v} # remove the leading v prefix for version + echo "VERSION=${VERSION}" >> $GITHUB_ENV + echo "VERSION=${VERSION}" + + # Convert to PEP 440 compliant version for Python packages + # Format: 0.7.0-1051-g54cd2f08 -> 0.7.0.dev1051+g54cd2f08 + if [[ "$VERSION" =~ ^([0-9]+\.[0-9]+\.[0-9]+)-([0-9]+)-g([a-f0-9]+)$ ]]; then + PEP440_VERSION="${BASH_REMATCH[1]}.dev${BASH_REMATCH[2]}+g${BASH_REMATCH[3]}" + else + # If it's already a clean version (e.g., 0.7.0), use as-is + PEP440_VERSION="$VERSION" + fi + echo "PEP440_VERSION=${PEP440_VERSION}" >> $GITHUB_ENV + echo "PEP440_VERSION=${PEP440_VERSION}" + + - name: Set build args + id: build-args + run: | + GIT_COMMIT=$(git rev-parse HEAD) + BUILD_DATE=$(date -u +'%Y-%m-%dT%H:%M:%SZ') + echo "git_commit=${GIT_COMMIT}" >> $GITHUB_OUTPUT + echo "build_date=${BUILD_DATE}" >> $GITHUB_OUTPUT + echo "GIT_COMMIT=${GIT_COMMIT}" + echo "BUILD_DATE=${BUILD_DATE}" + + - name: Set image tags + if: ${{ env.PUSH == 'true' }} + id: set-tags + run: | + TAGS="${{ env.REGISTRY }}/${{ matrix.image_name }}:${{ env.VERSION }}" + + if [[ "${{ github.ref }}" == "refs/heads/main" ]]; then + TAGS="$TAGS,${{ env.REGISTRY }}/${{ matrix.image_name }}:latest" + fi + + if [[ "${{ github.ref }}" == refs/heads/release-* ]]; then + RELEASE_BRANCH_NAME=$(basename "${{ github.ref }}") + TAGS="$TAGS,${{ env.REGISTRY }}/${{ matrix.image_name }}:${RELEASE_BRANCH_NAME}" + fi + + echo "tags=$TAGS" >> $GITHUB_OUTPUT + + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Log in to the Container registry + uses: docker/login-action@v3 + if: ${{ env.PUSH == 'true' }} + with: + registry: ${{ env.REGISTRY }} + username: jumpstarter-dev+jumpstarter_ci + password: ${{ secrets.QUAY_TOKEN }} + + - name: Extract metadata (tags, labels) for Docker + id: meta + uses: docker/metadata-action@v5 + with: + images: ${{ env.REGISTRY }}/${{ matrix.image_name }} + + - name: Build and push Docker image + id: push + uses: docker/build-push-action@v6 + with: + context: ${{ matrix.context }} + file: ${{ matrix.dockerfile }} + push: ${{ env.PUSH }} + tags: ${{ steps.set-tags.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + platforms: linux/amd64,linux/arm64 + cache-from: type=gha + cache-to: type=gha,mode=max + build-args: | + GIT_VERSION=${{ env.PEP440_VERSION }} + GIT_COMMIT=${{ steps.build-args.outputs.git_commit }} + BUILD_DATE=${{ steps.build-args.outputs.build_date }} + + - name: Generate artifact attestation + uses: actions/attest-build-provenance@v1 + if: ${{ env.PUSH == 'true' }} + with: + subject-name: ${{ env.REGISTRY }}/${{ matrix.image_name }} + subject-digest: ${{ steps.push.outputs.digest }} + push-to-registry: ${{ env.PUSH }} + + publish-helm-charts: + needs: build-and-push-image + if: ${{ github.repository_owner == 'jumpstarter-dev' && (github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/') || startsWith(github.ref, 'refs/heads/release-')) }} + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Get version + run: | + VERSION=$(git describe --tags) + VERSION=${VERSION#v} # remove the leading v prefix for version + echo "VERSION=${VERSION}" >> $GITHUB_ENV + echo "VERSION=${VERSION}" + + - name: Build helm charts + run: | + echo packaging ${VERSION} + # patch the sub-chart app-version, because helm package won't do it + sed -i "s/^appVersion:.*/appVersion: $VERSION/" controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/Chart.yaml + helm package ./controller/deploy/helm/jumpstarter --version "${VERSION}" --app-version "${VERSION}" + + - name: Login helm + env: + PASSWORD: ${{ secrets.QUAY_TOKEN }} + USER: jumpstarter-dev+jumpstarter_ci + run: + helm registry login quay.io -u ${USER} -p ${PASSWORD} + + - name: Push helm charts + run: | + helm push jumpstarter-*.tgz oci://${{ env.QUAY_ORG }}/helm + + if [[ "${{ github.ref }}" == "refs/heads/release-*" ]]; then + RELEASE_BRANCH_NAME=$(basename "${{ github.ref }}") + helm chart save jumpstarter-*.tgz ${{ env.QUAY_ORG }}/helm:${RELEASE_BRANCH_NAME} + helm chart push ${{ env.QUAY_ORG }}/helm:${RELEASE_BRANCH_NAME} + fi diff --git a/python/.github/workflows/build_oci_bundle.yaml b/.github/workflows/build-oci-bundle.yaml similarity index 73% rename from python/.github/workflows/build_oci_bundle.yaml rename to .github/workflows/build-oci-bundle.yaml index d06d14b78..f130f58ad 100644 --- a/python/.github/workflows/build_oci_bundle.yaml +++ b/.github/workflows/build-oci-bundle.yaml @@ -1,4 +1,5 @@ name: Build and push buildroot-based flasher OCI bundle + on: workflow_dispatch: @@ -14,17 +15,17 @@ jobs: - name: Run build_fits.sh run: | - cd packages/jumpstarter-driver-flashers/oci_bundles/aarch64-itb + cd python/packages/jumpstarter-driver-flashers/oci_bundles/aarch64-itb ./build_fits.sh - name: Upload FIT artifacts uses: actions/upload-artifact@v4 with: name: FIT-images - path: packages/jumpstarter-driver-flashers/oci_bundles/aarch64-itb/data/*.itb + path: python/packages/jumpstarter-driver-flashers/oci_bundles/aarch64-itb/data/*.itb - name: Run build_bundle.sh for aarch64-itb run: | - cd packages/jumpstarter-driver-flashers/oci_bundles && dnf install -y oras + cd python/packages/jumpstarter-driver-flashers/oci_bundles && dnf install -y oras oras login quay.io -u jumpstarter-dev+jumpstarter_ci --password-stdin <<< "${{ secrets.QUAY_TOKEN }}" ./build_bundle.sh quay.io/jumpstarter-dev/jumpstarter-flasher-aarch64-itb:latest aarch64-itb diff --git a/.github/workflows/controller-bundle.yaml b/.github/workflows/controller-bundle.yaml new file mode 100644 index 000000000..ab9dc5eb6 --- /dev/null +++ b/.github/workflows/controller-bundle.yaml @@ -0,0 +1,97 @@ +name: Check Bundle + +on: + pull_request: + branches: + - main + - 'release-*' + paths: + - 'controller/**' + +jobs: + check-bundle: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version: '1.24' + + - name: Cache bin directory (deploy/operator) + uses: actions/cache@v4 + with: + path: controller/deploy/operator/bin/ + key: ${{ runner.os }}-operator-bin-${{ hashFiles('controller/deploy/operator/go.mod') }} + restore-keys: | + ${{ runner.os }}-operator-bin- + + - name: Get version + run: | + if [ "${{ github.event_name }}" == "pull_request" ]; then + BASE_BRANCH="${{ github.base_ref }}" + if [ "$BASE_BRANCH" == "main" ]; then + TAG="latest" + elif [[ "$BASE_BRANCH" =~ ^release- ]]; then + TAG="$BASE_BRANCH" + else + echo "::error::Unknown base branch: $BASE_BRANCH" + exit 1 + fi + else + echo "::error::Unsupported event: ${{ github.event_name }}" + exit 1 + fi + echo "TAG=${TAG}" >> $GITHUB_ENV + echo "TAG=${TAG}" + + - name: Run make bundle + working-directory: controller/deploy/operator + run: | + make bundle IMG="quay.io/jumpstarter-dev/jumpstarter-operator:${TAG}" + + - name: Check for uncommitted changes + run: | + DIFF=$(git diff) + if [ -n "$DIFF" ]; then + # Filter out createdAt timestamp lines and context lines, check if any actual changes remain + FILTERED_DIFF=$(echo "$DIFF" | grep -vE '^(---|\+\+\+|@@|index|diff)' | grep -vE '^[+-].*createdAt:.*[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z' || true) + # Check if there are any non-timestamp, non-context changes + if [ -n "$FILTERED_DIFF" ] && [ -n "$(echo "$FILTERED_DIFF" | grep -E '^[+-]' || true)" ]; then + echo "::error::Uncommitted changes detected after running 'make bundle'. Please commit all bundle changes before pushing." + echo "::error::This can be done by running 'make bundle IMG=\"quay.io/jumpstarter-dev/jumpstarter-operator:${TAG}\"" + git diff + exit 1 + else + echo "Only timestamp changes detected (ignored). Bundle files are up to date." + # Reset the timestamp changes to keep the repo clean + git checkout -- . + fi + else + echo "No uncommitted changes detected. Bundle files are up to date." + fi + + - name: Ensure clean state before build-installer + run: | + # Reset any remaining changes from root + git checkout -- . || true + + - name: Run make build-installer + working-directory: controller/deploy/operator + run: | + make build-installer + + - name: Check for uncommitted changes after build-installer + run: | + if [ -n "$(git diff)" ]; then + echo "::error::Uncommitted changes detected after running 'make build-installer'. Please commit all installer changes before pushing." + echo "::error::This can be done by running 'make build-installer'" + git diff + exit 1 + else + echo "No uncommitted changes detected. Installer files are up to date." + fi diff --git a/.github/workflows/controller-kind.yaml b/.github/workflows/controller-kind.yaml new file mode 100644 index 000000000..fa152b630 --- /dev/null +++ b/.github/workflows/controller-kind.yaml @@ -0,0 +1,35 @@ +name: Kind based CI + +on: + workflow_dispatch: + pull_request: + branches: + - main + - 'release-*' + paths: + - 'controller/**' + +jobs: + deploy-kind: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Run make deploy + working-directory: controller + run: make deploy + + e2e-test-operator: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Run operator e2e test + working-directory: controller + run: make test-operator-e2e diff --git a/.github/workflows/controller-tests.yaml b/.github/workflows/controller-tests.yaml new file mode 100644 index 000000000..ca6a11a19 --- /dev/null +++ b/.github/workflows/controller-tests.yaml @@ -0,0 +1,35 @@ +name: Controller Unit/Functional tests + +on: + workflow_dispatch: + pull_request: + branches: + - main + - 'release-*' + paths: + - 'controller/**' + - 'protocol/**' + +jobs: + tests: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Run controller tests + working-directory: controller + run: make test + + - name: Cache operator bin directory + uses: actions/cache@v4 + with: + path: controller/deploy/operator/bin/ + key: ${{ runner.os }}-operator-bin-${{ hashFiles('controller/deploy/operator/go.mod') }} + restore-keys: | + ${{ runner.os }}-operator-bin- + + - name: Run operator tests + run: make -C controller/deploy/operator test diff --git a/python/.github/workflows/documentation.yaml b/.github/workflows/documentation.yaml similarity index 92% rename from python/.github/workflows/documentation.yaml rename to .github/workflows/documentation.yaml index 75e9758d2..bb7402d0c 100644 --- a/python/.github/workflows/documentation.yaml +++ b/.github/workflows/documentation.yaml @@ -1,10 +1,16 @@ -name: documentation +name: Documentation on: # Runs on pushes targeting the default branch push: branches: ["main"] + paths: + - 'python/docs/**' + - 'python/packages/**' pull_request: + paths: + - 'python/docs/**' + - 'python/packages/**' merge_group: # Allows you to run this workflow manually from the Actions tab @@ -25,6 +31,7 @@ concurrency: defaults: run: shell: bash + working-directory: python jobs: # Build job @@ -66,7 +73,7 @@ jobs: - name: Upload artifact uses: actions/upload-pages-artifact@v3 with: - path: ./docs/build + path: ./python/docs/build check-warnings: runs-on: ubuntu-latest diff --git a/.github/workflows/e2e.yaml b/.github/workflows/e2e.yaml new file mode 100644 index 000000000..d13d6472e --- /dev/null +++ b/.github/workflows/e2e.yaml @@ -0,0 +1,44 @@ +name: End-to-end tests + +on: + workflow_dispatch: + pull_request: + branches: + - main + - 'release-*' + merge_group: + +permissions: + contents: read + +jobs: + e2e-tests: + if: github.repository_owner == 'jumpstarter-dev' + strategy: + matrix: + os: + - ubuntu-24.04 + - ubuntu-24.04-arm + runs-on: ${{ matrix.os }} + timeout-minutes: 60 + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Install uv + uses: astral-sh/setup-uv@v2 + + - name: Install Go + uses: actions/setup-go@v5 + with: + go-version: '1.22' + + - name: Setup e2e test environment + run: make e2e-setup + env: + CI: true + + - name: Run e2e tests + run: make e2e-run + env: + CI: true diff --git a/.github/workflows/lint.yaml b/.github/workflows/lint.yaml new file mode 100644 index 000000000..a08f98cb9 --- /dev/null +++ b/.github/workflows/lint.yaml @@ -0,0 +1,109 @@ +name: Linters + +on: + workflow_dispatch: + push: + branches: + - main + - 'release-*' + pull_request: + branches: + - main + - 'release-*' + merge_group: + +permissions: + contents: read + pull-requests: read + +jobs: + # Detect which paths changed to conditionally run linters + changes: + runs-on: ubuntu-latest + outputs: + controller: ${{ steps.filter.outputs.controller }} + helm: ${{ steps.filter.outputs.helm }} + protocol: ${{ steps.filter.outputs.protocol }} + python: ${{ steps.filter.outputs.python }} + steps: + - uses: actions/checkout@v4 + - uses: dorny/paths-filter@v3 + id: filter + with: + filters: | + controller: + - 'controller/**' + helm: + - 'controller/deploy/helm/**' + protocol: + - 'protocol/**' + python: + - 'python/**' + + lint-go: + needs: changes + if: needs.changes.outputs.controller == 'true' + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version: '1.24' + + - name: Run go linter + working-directory: controller + run: make lint + + lint-helm: + needs: changes + if: needs.changes.outputs.helm == 'true' + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Run helm linter + working-directory: controller + run: make lint-helm + + lint-protobuf: + needs: changes + if: needs.changes.outputs.protocol == 'true' + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + ref: ${{ github.event.pull_request.head.sha }} + + - name: Run protobuf linter + working-directory: protocol + run: make lint + + lint-python: + needs: changes + if: needs.changes.outputs.python == 'true' + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Run ruff + uses: astral-sh/ruff-action@84f83ecf9e1e15d26b7984c7ec9cf73d39ffc946 # v3.3.1 + with: + src: './python' + version-file: python/pyproject.toml + + typos: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Run typos + uses: crate-ci/typos@0f0ccba9ed1df83948f0c15026e4f5ccfce46109 # v1.32.0 + with: + config: ./typos.toml diff --git a/python/.github/workflows/pr_analytics.yaml b/.github/workflows/pr-analytics.yaml similarity index 59% rename from python/.github/workflows/pr_analytics.yaml rename to .github/workflows/pr-analytics.yaml index 60019b597..401337a1b 100644 --- a/python/.github/workflows/pr_analytics.yaml +++ b/.github/workflows/pr-analytics.yaml @@ -1,4 +1,5 @@ -name: "PR Analytics" +name: PR Analytics + on: workflow_dispatch: inputs: @@ -6,22 +7,23 @@ on: description: "Report date start(d/MM/yyyy)" report_date_end: description: "Report date end(d/MM/yyyy)" + jobs: create-report: - name: "Create report" + name: Create report runs-on: ubuntu-latest permissions: contents: read pull-requests: read issues: write steps: - - name: "Run script for analytics" + - name: Run script for analytics uses: AlexSim93/pull-request-analytics-action@cc57ceb92148c5d5879ca578a2b59f99c3cbe231 # v4.6.1 with: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # In the case of a personal access token, it needs to be added to the repository's secrets and used in this field. + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_REPO_FOR_ISSUE: jumpstarter - GITHUB_OWNER_FOR_ISSUE: jumpstarter-dev - GITHUB_OWNERS_REPOS: jumpstarter-dev/jumpstarter #TODO: check with more repos later, needs PAT: ,jumpstarter-dev/jumpstarter-controller + GITHUB_OWNER_FOR_ISSUE: jumpstarter-dev + GITHUB_OWNERS_REPOS: jumpstarter-dev/jumpstarter USE_CHARTS: true TIMEZONE: "Etc/UTC" REPORT_DATE_START: ${{ inputs.report_date_start }} diff --git a/python/.github/workflows/pytest.yaml b/.github/workflows/python-tests.yaml similarity index 79% rename from python/.github/workflows/pytest.yaml rename to .github/workflows/python-tests.yaml index 81b8ae52e..634b1f73a 100644 --- a/python/.github/workflows/pytest.yaml +++ b/.github/workflows/python-tests.yaml @@ -1,16 +1,24 @@ -name: "Run Tests" +name: Python Tests + on: workflow_dispatch: push: branches: - main - release-* + paths: + - 'python/**' + - 'protocol/**' pull_request: + paths: + - 'python/**' + - 'protocol/**' merge_group: permissions: contents: read pull-requests: read + jobs: pytest-matrix: runs-on: ${{ matrix.runs-on }} @@ -51,27 +59,39 @@ jobs: sudo apt-get update sudo apt-get install -y libgpiod-dev liblgpio-dev + - name: Install sigrok-cli (Linux) + if: runner.os == 'Linux' + run: | + sudo apt-get update + sudo apt-get install -y sigrok-cli + - name: Install Qemu (macOS) if: runner.os == 'macOS' run: | brew install qemu + - name: Install sigrok-cli (macOS) + if: runner.os == 'macOS' + run: | + brew install sigrok-cli + - name: Cache Fedora Cloud images id: cache-fedora-cloud-images uses: actions/cache@v4 with: - path: packages/jumpstarter-driver-qemu/images + path: python/packages/jumpstarter-driver-qemu/images key: fedora-cloud-41-1.4 - name: Download Fedora Cloud images if: steps.cache-fedora-cloud-images.outputs.cache-hit != 'true' run: | for arch in aarch64 x86_64; do - curl -L --output "packages/jumpstarter-driver-qemu/images/Fedora-Cloud-Base-Generic-41-1.4.${arch}.qcow2" \ + curl -L --output "python/packages/jumpstarter-driver-qemu/images/Fedora-Cloud-Base-Generic-41-1.4.${arch}.qcow2" \ "https://download.fedoraproject.org/pub/fedora/linux/releases/41/Cloud/${arch}/images/Fedora-Cloud-Base-Generic-41-1.4.${arch}.qcow2" done - name: Run pytest + working-directory: python run: | make test diff --git a/python/.github/workflows/trigger-packages-index.yaml b/.github/workflows/trigger-packages.yaml similarity index 100% rename from python/.github/workflows/trigger-packages-index.yaml rename to .github/workflows/trigger-packages.yaml diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000..a746eb15e --- /dev/null +++ b/.gitignore @@ -0,0 +1,27 @@ +# E2E test artifacts and local configuration +.e2e-setup-complete +.e2e/ +.bats/ +ca.pem +ca-key.pem +ca.csr +server.pem +server-key.pem +server.csr + +# Python +.venv/ +__pycache__/ +*.pyc +*.pyo +*.egg-info/ +dist/ +build/ + +# Editor/IDE +.vscode/ +.idea/ +*.swp +*.swo +*~ +.DS_Store diff --git a/Makefile b/Makefile new file mode 100644 index 000000000..a53d5ed3d --- /dev/null +++ b/Makefile @@ -0,0 +1,178 @@ +# Jumpstarter Monorepo Makefile +# +# This Makefile provides common targets that delegate to subdirectory Makefiles. +# + +# Subdirectories containing projects +SUBDIRS := python protocol controller e2e + +# Default target +.PHONY: all +all: build + +# Help target - shows available commands +.PHONY: help +help: + @echo "Jumpstarter Monorepo" + @echo "" + @echo "Available targets:" + @echo " make all - Build all projects (default)" + @echo " make build - Build all projects" + @echo " make test - Run tests in all projects" + @echo " make clean - Clean build artifacts in all projects" + @echo " make lint - Run linters in all projects" + @echo " make fmt - Format code in all projects" + @echo "" + @echo "End-to-end testing:" + @echo " make e2e-setup - Setup e2e test environment (one-time)" + @echo " make e2e-run - Run e2e tests (requires e2e-setup first)" + @echo " make e2e - Same as e2e-run" + @echo " make e2e-full - Full setup + run (for CI or first time)" + @echo " make e2e-clean - Clean up e2e test environment (delete cluster, certs, etc.)" + @echo "" + @echo "Per-project targets:" + @echo " make build- - Build specific project" + @echo " make test- - Test specific project" + @echo " make clean- - Clean specific project" + @echo "" + @echo "Projects: $(SUBDIRS)" + +# Build all projects +.PHONY: build +build: + @for dir in $(SUBDIRS); do \ + if [ -f $$dir/Makefile ]; then \ + echo "Building $$dir..."; \ + $(MAKE) -C $$dir build || true; \ + fi \ + done + +# Test all projects +.PHONY: test +test: + @for dir in $(SUBDIRS); do \ + if [ -f $$dir/Makefile ]; then \ + echo "Testing $$dir..."; \ + $(MAKE) -C $$dir test ; \ + fi \ + done + +# Clean all projects +.PHONY: clean +clean: + @for dir in $(SUBDIRS); do \ + if [ -f $$dir/Makefile ]; then \ + echo "Cleaning $$dir..."; \ + $(MAKE) -C $$dir clean || true; \ + fi \ + done + +# Lint all projects +.PHONY: lint +lint: + @for dir in $(SUBDIRS); do \ + if [ -f $$dir/Makefile ]; then \ + echo "Linting $$dir..."; \ + $(MAKE) -C $$dir lint; \ + fi \ + done + +# Format all projects +.PHONY: fmt +fmt: + @for dir in $(SUBDIRS); do \ + if [ -f $$dir/Makefile ]; then \ + echo "Formatting $$dir..."; \ + $(MAKE) -C $$dir fmt || true; \ + fi \ + done + +# Per-project build targets +.PHONY: build-python build-protocol build-controller build-e2e +build-python: + @if [ -f python/Makefile ]; then $(MAKE) -C python build; fi + +build-protocol: + @if [ -f protocol/Makefile ]; then $(MAKE) -C protocol build; fi + +build-controller: + @if [ -f controller/Makefile ]; then $(MAKE) -C controller build; fi + +build-e2e: + @if [ -f e2e/Makefile ]; then $(MAKE) -C e2e build; fi + +# Per-project test targets +.PHONY: test-python test-protocol test-controller test-e2e +test-python: + @if [ -f python/Makefile ]; then $(MAKE) -C python test; fi + +test-protocol: + @if [ -f protocol/Makefile ]; then $(MAKE) -C protocol test; fi + +test-controller: + @if [ -f controller/Makefile ]; then $(MAKE) -C controller test; fi + +# Setup e2e testing environment (one-time) +.PHONY: e2e-setup +e2e-setup: + @echo "Setting up e2e test environment..." + @bash e2e/setup-e2e.sh + +# Run e2e tests +.PHONY: e2e-run +e2e-run: + @echo "Running e2e tests..." + @bash e2e/run-e2e.sh + +# Convenience alias for running e2e tests +.PHONY: e2e +e2e: e2e-run + +# Full e2e setup + run +.PHONY: e2e-full +e2e-full: + @bash e2e/run-e2e.sh --full + +# Clean up e2e test environment +.PHONY: e2e-clean +e2e-clean: + @echo "Cleaning up e2e test environment..." + @if command -v kind >/dev/null 2>&1; then \ + echo "Deleting jumpstarter kind cluster..."; \ + kind delete cluster --name jumpstarter 2>/dev/null || true; \ + fi + @echo "Removing certificates and setup files..." + @rm -f ca.pem ca-key.pem ca.csr server.pem server-key.pem server.csr + @rm -f .e2e-setup-complete + @echo "Removing local e2e configuration directory..." + @rm -rf .e2e + @echo "Removing virtual environment..." + @rm -rf .venv + @echo "Removing local bats libraries..." + @rm -rf .bats + @if [ -d /etc/jumpstarter/exporters ] && [ -w /etc/jumpstarter/exporters ]; then \ + echo "Removing exporter configs..."; \ + rm -rf /etc/jumpstarter/exporters/* 2>/dev/null || true; \ + fi + @echo "✓ E2E test environment cleaned" + @echo "" + @echo "Note: You may need to manually remove the dex entry from /etc/hosts:" + @echo " sudo sed -i.bak '/dex.dex.svc.cluster.local/d' /etc/hosts" + +# Backward compatibility alias +.PHONY: test-e2e +test-e2e: e2e-run + +# Per-project clean targets +.PHONY: clean-python clean-protocol clean-controller clean-e2e +clean-python: + @if [ -f python/Makefile ]; then $(MAKE) -C python clean; fi + +clean-protocol: + @if [ -f protocol/Makefile ]; then $(MAKE) -C protocol clean; fi + +clean-controller: + @if [ -f controller/Makefile ]; then $(MAKE) -C controller clean; fi + +clean-e2e: + @if [ -f e2e/Makefile ]; then $(MAKE) -C e2e clean; fi diff --git a/README.md b/README.md new file mode 100644 index 000000000..f50fba910 --- /dev/null +++ b/README.md @@ -0,0 +1,172 @@ +# ![bolt](python/assets/bolt.svg) Jumpstarter + +[![Matrix](https://img.shields.io/matrix/jumpstarter%3Amatrix.org?color=blue)](https://matrix.to/#/#jumpstarter:matrix.org) +[![Etherpad](https://img.shields.io/badge/Etherpad-Notes-blue?logo=etherpad)](https://etherpad.jumpstarter.dev/pad-lister) +[![Community Meeting](https://img.shields.io/badge/Weekly%20Meeting-Google%20Meet-blue?logo=google-meet)](https://meet.google.com/gzd-hhbd-hpu) +![GitHub Release](https://img.shields.io/github/v/release/jumpstarter-dev/jumpstarter) +![PyPI - Version](https://img.shields.io/pypi/v/jumpstarter) +[![Ask DeepWiki](https://deepwiki.com/badge.svg)](https://deepwiki.com/jumpstarter-dev/jumpstarter) + +A free, open source tool for automated testing on real and virtual hardware with +CI/CD integration. Simplify device automation with consistent rules across local +and distributed environments. + +## Highlights + +- 🧪 **Unified Testing** - One tool for local, virtual, and remote hardware +- 🐍 **Python-Powered** - Leverage Python's testing ecosystem +- 🔌 **Hardware Abstraction** - Simplify complex hardware interfaces with drivers +- 🌐 **Collaborative** - Share test hardware globally +- ⚙️ **CI/CD Ready** - Works with cloud native developer environments and pipelines +- 💻 **Cross-Platform** - Supports Linux and macOS + +## Repository Structure + +This monorepo contains all Jumpstarter components: + +| Directory | Description | +|-----------|-------------| +| [`python/`](python/) | Python client, CLI, drivers, and testing framework | +| [`controller/`](controller/) | Kubernetes controller and operator (Jumpstarter Service) | +| [`protocol/`](protocol/) | gRPC protocol definitions (protobuf) | +| [`e2e/`](e2e/) | End-to-end testing infrastructure | + +## Quick Start + +### Install the CLI + +```shell +pip install --extra-index-url https://pkg.jumpstarter.dev/ jumpstarter-cli +``` + +Or install all Python components: + +```shell +pip install --extra-index-url https://pkg.jumpstarter.dev/ jumpstarter-all +``` + +### Deploy the Service + +To install the Jumpstarter Service in your Kubernetes cluster, see the +[Service Installation](https://jumpstarter.dev/main/getting-started/installation/index.html) +documentation. + +## Components + +### Python Client & Drivers (`python/`) + +The Python implementation provides: +- `jmp` CLI tool for interacting with hardware +- Client libraries for test automation +- Hardware drivers for various devices +- Testing framework integration + +See [`python/README.md`](python/README.md) for details. + +### Jumpstarter Service (`controller/`) + +The Kubernetes-native service that provides: +- Centralized hardware management +- Client and exporter routing +- Authentication and authorization +- Multi-tenant support + +**Prerequisites:** +- Kubernetes v1.11.3+ +- kubectl v1.11.3+ + +See [`controller/README.md`](controller/README.md) for deployment instructions. + +### Protocol (`protocol/`) + +The gRPC-based communication layer that enables: +- Unified interface for virtual and physical hardware +- Secure communication over HTTPS +- Tunneling support for Unix sockets, TCP, and UDP +- Flexible topology with direct or routed connections + +See [`protocol/README.md`](protocol/README.md) for details. + +### End-to-End Tests (`e2e/`) + +Comprehensive testing infrastructure for the entire Jumpstarter stack: +- `setup-e2e.sh` - One-time environment setup (auto-installs bats libraries on macOS) +- `run-e2e.sh` - Quick test runner for iterations +- `action.yml` - GitHub Actions composite action for CI/CD +- Full integration tests covering authentication, exporters, and clients + +Run e2e tests locally: +```shell +# First time setup +make e2e-setup + +# Run tests (repeat as needed) +make e2e # or: make e2e-run + +# Or full setup + run in one command +make e2e-full + +# Clean up e2e environment (delete cluster, certs, etc.) +make e2e-clean +``` + +## Development + +### Prerequisites + +- Python 3.11+ (for Python components) +- Go 1.22+ (for controller) +- Docker/Podman (for container builds) +- kubectl (for Kubernetes deployment) + +### Building + +```shell +# Build all components +make all + +# Build specific components +make python # Python packages +make controller # Controller binary +make protocol # Generate protocol code + +# Run tests +make test + +# Run end-to-end tests +make e2e-setup # First time only +make e2e # Run tests +make e2e-clean # Clean up +``` + +### Running Locally + +```shell +# Start a local development environment +make dev +``` + +## Documentation + +Jumpstarter's documentation is available at [jumpstarter.dev](https://jumpstarter.dev). + +- [Getting Started](https://jumpstarter.dev/main/getting-started/) +- [User Guide](https://jumpstarter.dev/main/introduction/) +- [API Reference](https://jumpstarter.dev/main/api/) +- [Contributing Guide](https://jumpstarter.dev/main/contributing.html) + +## Contributing + +Jumpstarter welcomes contributors of all levels of experience! See the +[contributing guide](https://jumpstarter.dev/main/contributing.html) to get started. + +### Community + +- [Matrix Chat](https://matrix.to/#/#jumpstarter:matrix.org) +- [Weekly Meeting](https://meet.google.com/gzd-hhbd-hpu) +- [Meeting Notes](https://etherpad.jumpstarter.dev/pad-lister) + +## License + +Jumpstarter is licensed under the Apache 2.0 License ([LICENSE](LICENSE) or +[https://www.apache.org/licenses/LICENSE-2.0](https://www.apache.org/licenses/LICENSE-2.0)). diff --git a/controller/.dockerignore b/controller/.dockerignore new file mode 100644 index 000000000..a3aab7af7 --- /dev/null +++ b/controller/.dockerignore @@ -0,0 +1,3 @@ +# More info: https://docs.docker.com/engine/reference/builder/#dockerignore-file +# Ignore build and test binaries. +bin/ diff --git a/controller/.gitignore b/controller/.gitignore new file mode 100644 index 000000000..8321461d2 --- /dev/null +++ b/controller/.gitignore @@ -0,0 +1,29 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib +bin/* +Dockerfile.cross + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Go workspace file +go.work + +# Kubernetes Generated files - skip generated files, except for vendored files +!vendor/**/zz_generated.* + +# editor and IDE paraphernalia +.idea +.vscode +*.swp +*.swo +*~ + +goreleaser/ diff --git a/controller/.golangci.yml b/controller/.golangci.yml new file mode 100644 index 000000000..9673c2e5f --- /dev/null +++ b/controller/.golangci.yml @@ -0,0 +1,48 @@ +version: "2" +run: + allow-parallel-runners: true +linters: + default: none + enable: + - copyloopvar + - dupl + - errcheck + - ginkgolinter + - goconst + - gocyclo + - govet + - ineffassign + - lll + - misspell + - nakedret + - staticcheck + - unconvert + - unparam + - unused + exclusions: + generated: lax + rules: + - linters: + - lll + path: api/* + - linters: + - dupl + path: api/.*_test\.go + - linters: + - dupl + - lll + path: internal/* + paths: + - third_party$ + - builtin$ + - examples$ +formatters: + enable: + - gofmt + - goimports + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ diff --git a/controller/.goreleaser.yaml b/controller/.goreleaser.yaml new file mode 100644 index 000000000..36be500b7 --- /dev/null +++ b/controller/.goreleaser.yaml @@ -0,0 +1,23 @@ +# vim: set ts=2 sw=2 tw=0 fo=cnqoj + +version: 2 + +dist: goreleaser + +before: + hooks: + - go mod tidy + +builds: + - env: + - CGO_ENABLED=0 + goos: + - linux + - darwin + id: jmpctl + main: ./cmd/jmpctl + binary: jmpctl + +archives: + - id: jmpctl + format: binary diff --git a/controller/.ko.yaml b/controller/.ko.yaml new file mode 100644 index 000000000..0b1e60bf1 --- /dev/null +++ b/controller/.ko.yaml @@ -0,0 +1,4 @@ +defaultBaseImage: gcr.io/distroless/static:nonroot +defaultPlatforms: + - linux/arm64 + - linux/amd64 diff --git a/controller/Dockerfile b/controller/Dockerfile new file mode 100644 index 000000000..106652d46 --- /dev/null +++ b/controller/Dockerfile @@ -0,0 +1,48 @@ +# Build the manager binary +FROM registry.access.redhat.com/ubi9/go-toolset:1.24.6 AS builder +ARG TARGETOS +ARG TARGETARCH +ARG GIT_VERSION=unknown +ARG GIT_COMMIT=unknown +ARG BUILD_DATE=unknown + +# Copy the Go Modules manifests +COPY go.mod go.mod +COPY go.sum go.sum +# cache deps before building and copying source so that we don't need to re-download as much +# and so that source changes don't invalidate our downloaded layer +# Cache module downloads across builds +RUN --mount=type=cache,target=/opt/app-root/src/go/pkg/mod,sharing=locked,uid=1001,gid=0 \ + --mount=type=cache,target=/opt/app-root/src/.cache/go-build,sharing=locked,uid=1001,gid=0 \ + go mod download + +# Copy the go source +COPY cmd/ cmd/ +COPY api/ api/ +COPY internal/ internal/ + +# Build +# the GOARCH has not a default value to allow the binary be built according to the host where the command +# was called. For example, if we call make docker-build in a local env which has the Apple Silicon M1 SO +# the docker BUILDPLATFORM arg will be linux/arm64 when for Apple x86 it will be linux/amd64. Therefore, +# by leaving it empty we can ensure that the container and binary shipped on it will have the same platform. +RUN --mount=type=cache,target=/opt/app-root/src/go/pkg/mod,sharing=locked,uid=1001,gid=0 \ +--mount=type=cache,target=/opt/app-root/src/.cache/go-build,sharing=locked,uid=1001,gid=0 \ + CGO_ENABLED=0 GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH} \ + go build -a \ + -ldflags "-X main.version=${GIT_VERSION} -X main.gitCommit=${GIT_COMMIT} -X main.buildDate=${BUILD_DATE}" \ + -o manager cmd/main.go +RUN --mount=type=cache,target=/opt/app-root/src/go/pkg/mod,sharing=locked,uid=1001,gid=0 \ + --mount=type=cache,target=/opt/app-root/src/.cache/go-build,sharing=locked,uid=1001,gid=0 \ + CGO_ENABLED=0 GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH} \ + go build -a \ + -ldflags "-X main.version=${GIT_VERSION} -X main.gitCommit=${GIT_COMMIT} -X main.buildDate=${BUILD_DATE}" \ + -o router cmd/router/main.go + +FROM registry.access.redhat.com/ubi9/ubi-micro:9.5 +WORKDIR / +COPY --from=builder /opt/app-root/src/manager . +COPY --from=builder /opt/app-root/src/router . +USER 65532:65532 + +ENTRYPOINT ["/manager"] diff --git a/controller/Dockerfile.operator b/controller/Dockerfile.operator new file mode 100644 index 000000000..ccbd42a84 --- /dev/null +++ b/controller/Dockerfile.operator @@ -0,0 +1,47 @@ +# Build the manager binary +FROM registry.access.redhat.com/ubi9/go-toolset:1.24.6 AS builder +ARG TARGETOS +ARG TARGETARCH +ARG GIT_VERSION=unknown +ARG GIT_COMMIT=unknown +ARG BUILD_DATE=unknown + +# Copy the Go Modules manifests +COPY --chown=1001:0 deploy/operator/go.mod deploy/operator/go.mod +COPY --chown=1001:0 deploy/operator/go.sum deploy/operator/go.sum +COPY --chown=1001:0 go.mod go.mod +COPY --chown=1001:0 go.sum go.sum + + +# cache deps before building and copying source so that we don't need to re-download as much +# and so that source changes don't invalidate our downloaded layer +RUN --mount=type=cache,target=/opt/app-root/src/go/pkg/mod,sharing=locked,uid=1001,gid=0 \ + --mount=type=cache,target=/opt/app-root/src/.cache/go-build,sharing=locked,uid=1001,gid=0 \ + cd deploy/operator && go mod download + +# Copy the base jumpstarter-controller internal/config parts +COPY --chown=1001:0 internal/ internal/ +COPY --chown=1001:0 api/ api/ +# Copy the go source +COPY --chown=1001:0 deploy/operator/cmd/ deploy/operator/cmd/ +COPY --chown=1001:0 deploy/operator/api/ deploy/operator/api/ +COPY --chown=1001:0 deploy/operator/internal/ deploy/operator/internal/ + +# Build +# the GOARCH has not a default value to allow the binary be built according to the host where the command +# was called. For example, if we call make docker-build in a local env which has the Apple Silicon M1 SO +# the docker BUILDPLATFORM arg will be linux/arm64 when for Apple x86 it will be linux/amd64. Therefore, +# by leaving it empty we can ensure that the container and binary shipped on it will have the same platform. +RUN --mount=type=cache,target=/opt/app-root/src/go/pkg/mod,sharing=locked,uid=1001,gid=0 \ + --mount=type=cache,target=/opt/app-root/src/.cache/go-build,sharing=locked,uid=1001,gid=0 \ + CGO_ENABLED=0 GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH} \ + cd deploy/operator && go build -a \ + -ldflags "-X main.version=${GIT_VERSION} -X main.gitCommit=${GIT_COMMIT} -X main.buildDate=${BUILD_DATE}" \ + -o manager cmd/main.go + +FROM registry.access.redhat.com/ubi9/ubi-micro:9.5 +WORKDIR / +COPY --from=builder /opt/app-root/src/deploy/operator/manager . +USER 65532:65532 + +ENTRYPOINT ["/manager"] \ No newline at end of file diff --git a/controller/Makefile b/controller/Makefile new file mode 100644 index 000000000..16d4e1ce3 --- /dev/null +++ b/controller/Makefile @@ -0,0 +1,291 @@ +GO_FILES := $(shell find ./ -name ".go" -not -path "./bin" -not -path "./packaging/*") + +# Image URL to use all building/pushing image targets +IMG ?= quay.io/jumpstarter-dev/jumpstarter-controller:latest +DOCKER_REPO = $(shell echo $(IMG) | cut -d: -f1) +DOCKER_TAG = $(shell echo $(IMG) | cut -d: -f2) +# ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary. +ENVTEST_K8S_VERSION = 1.30.0 + +# Version information +GIT_VERSION := $(shell git describe --tags --always --dirty 2>/dev/null || echo "unknown") +GIT_COMMIT := $(shell git rev-parse HEAD 2>/dev/null || echo "unknown") +BUILD_DATE := $(shell date -u +'%Y-%m-%dT%H:%M:%SZ') + +# LDFLAGS for version information +LDFLAGS := -X main.version=$(GIT_VERSION) \ + -X main.gitCommit=$(GIT_COMMIT) \ + -X main.buildDate=$(BUILD_DATE) + +# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) +ifeq (,$(shell go env GOBIN)) +GOBIN=$(shell go env GOPATH)/bin +else +GOBIN=$(shell go env GOBIN) +endif + +# CONTAINER_TOOL defines the container tool to be used for building images. +# Be aware that the target commands are only tested with Docker which is +# scaffolded by default. However, you might want to replace it to use other +# tools. (i.e. podman) +CONTAINER_TOOL ?= podman + +# Setting SHELL to bash allows bash commands to be executed by recipes. +# Options are set to exit when a recipe line exits non-zero or a piped command fails. +SHELL = /usr/bin/env bash -o pipefail +.SHELLFLAGS = -ec + +.PHONY: all +all: build + +##@ General + +# The help target prints out all targets with their descriptions organized +# beneath their categories. The categories are represented by '##@' and the +# target descriptions by '##'. The awk command is responsible for reading the +# entire set of makefiles included in this invocation, looking for lines of the +# file as xyz: ## something, and then pretty-format the target and help. Then, +# if there's a line with ##@ something, that gets pretty-printed as a category. +# More info on the usage of ANSI control characters for terminal formatting: +# https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters +# More info on the awk command: +# http://linuxcommand.org/lc3_adv_awk.php + +.PHONY: help +help: ## Display this help. + @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) + +##@ Development + +.PHONY: manifests +manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects. + $(CONTROLLER_GEN) rbac:roleName=jumpstarter-manager-role crd webhook paths="./api/..." paths="./internal/..." \ + output:crd:artifacts:config=deploy/helm/jumpstarter/crds/ \ + output:rbac:artifacts:config=deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/rbac/ + + cp deploy/helm/jumpstarter/crds/* deploy/operator/config/crd/bases/ + +.PHONY: generate +generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations. + $(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./api/..." paths="./internal/..." + +.PHONY: fmt +fmt: ## Run go fmt against code. + go fmt ./... + +.PHONY: vet +vet: ## Run go vet against code. + go vet ./... + +.PHONY: test +test: manifests generate fmt vet envtest ## Run tests. + KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go test $$(go list ./... | grep -v /e2e) -coverprofile cover.out + +# Utilize Kind or modify the e2e tests to load the image locally, enabling compatibility with other vendors. +.PHONY: test-e2e # Run the e2e tests against a Kind k8s instance that is spun up. +test-e2e: + go test ./test/e2e/ -v -ginkgo.v + +.PHONY: lint +lint: golangci-lint ## Run golangci-lint linter + $(GOLANGCI_LINT) run + +.PHONY: lint-fix +lint-fix: golangci-lint ## Run golangci-lint linter and perform fixes + $(GOLANGCI_LINT) run --fix + +##@ Build +.PHONY: build-operator +build-operator: + make -C deploy/operator build-installer docker-build + +.PHONY: build +build: manifests generate fmt vet ## Build manager binary. + go build -ldflags "$(LDFLAGS)" -o bin/manager cmd/main.go + go build -ldflags "$(LDFLAGS)" -o bin/router cmd/router/main.go + +.PHONY: run +run: manifests generate fmt vet ## Run a controller from your host. + go run ./cmd/main.go + +.PHONY: run-router +run-router: manifests generate fmt vet ## Run a router from your host. + go run ./cmd/router/main.go + +# If you wish to build the manager image targeting other platforms you can use the --platform flag. +# (i.e. docker build --platform linux/arm64). However, you must enable docker buildKit for it. +# More info: https://docs.docker.com/develop/develop-images/build_enhancements/ +.PHONY: docker-build +docker-build: ## Build docker image with the manager. + $(CONTAINER_TOOL) build \ + --build-arg GIT_VERSION=$(GIT_VERSION) \ + --build-arg GIT_COMMIT=$(GIT_COMMIT) \ + --build-arg BUILD_DATE=$(BUILD_DATE) \ + -t ${IMG} . + +.PHONY: docker-push +docker-push: ## Push docker image with the manager. + $(CONTAINER_TOOL) push ${IMG} + +# PLATFORMS defines the target platforms for the manager image be built to provide support to multiple +# architectures. (i.e. make docker-buildx IMG=myregistry/mypoperator:0.0.1). To use this option you need to: +# - be able to use docker buildx. More info: https://docs.docker.com/build/buildx/ +# - have enabled BuildKit. More info: https://docs.docker.com/develop/develop-images/build_enhancements/ +# - be able to push the image to your registry (i.e. if you do not set a valid value via IMG=> then the export will fail) +# To adequately provide solutions that are compatible with multiple platforms, you should consider using this option. +PLATFORMS ?= linux/arm64,linux/amd64,linux/s390x,linux/ppc64le +.PHONY: docker-buildx +docker-buildx: ## Build and push docker image for the manager for cross-platform support + # copy existing Dockerfile and insert --platform=${BUILDPLATFORM} into Dockerfile.cross, and preserve the original Dockerfile + sed -e '1 s/\(^FROM\)/FROM --platform=\$$\{BUILDPLATFORM\}/; t' -e ' 1,// s//FROM --platform=\$$\{BUILDPLATFORM\}/' Dockerfile > Dockerfile.cross + - $(CONTAINER_TOOL) buildx create --name jumpstarter-controller-builder + $(CONTAINER_TOOL) buildx use jumpstarter-controller-builder + - $(CONTAINER_TOOL) buildx build --push --platform=$(PLATFORMS) \ + --build-arg GIT_VERSION=$(GIT_VERSION) \ + --build-arg GIT_COMMIT=$(GIT_COMMIT) \ + --build-arg BUILD_DATE=$(BUILD_DATE) \ + --tag ${DOCKER_REPO}:${DOCKER_TAG} \ + --tag ${DOCKER_REPO}:latest \ + -f Dockerfile.cross . + - $(CONTAINER_TOOL) buildx rm jumpstarter-controller-builder + rm Dockerfile.cross + +.PHONY: build-installer +build-installer: manifests generate kustomize ## Generate a consolidated YAML with CRDs and deployment. + mkdir -p dist + cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} + $(KUSTOMIZE) build config/default > dist/install.yaml + +##@ Deployment + +ifndef ignore-not-found + ignore-not-found = false +endif + +.PHONY: install +install: manifests kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config. + $(KUSTOMIZE) build config/crd | $(KUBECTL) apply -f - + +.PHONY: uninstall +uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion. + $(KUSTOMIZE) build config/crd | $(KUBECTL) delete --ignore-not-found=$(ignore-not-found) -f - + +.PHONY: deploy +deploy: docker-build cluster grpcurl + ./hack/deploy_with_helm.sh + +.PHONY: deploy-with-operator +deploy-with-operator: docker-build build-operator cluster grpcurl + ./hack/deploy_with_operator.sh + +.PHONY: deploy-operator +deploy-operator: docker-build build-operator cluster grpcurl + NETWORKING_MODE=ingress DEPLOY_JUMPSTARTER=false ./hack/deploy_with_operator.sh + +.PHONY: test-operator-e2e +test-operator-e2e: grpcurl deploy-operator + make -C deploy/operator test-e2e +.PHONY: operator-logs +operator-logs: + kubectl logs -n jumpstarter-operator-system -l app.kubernetes.io/name=jumpstarter-operator -f + +.PHONY: deploy-with-operator-parallel +deploy-with-operator-parallel: + make deploy-with-operator -j5 --output-sync=target + +.PHONY: deploy-exporters +deploy-exporters: + ./hack/demoenv/prepare_exporters.sh + +.PHONY: lint-helm +lint-helm: + helm lint deploy/helm/jumpstarter + + +.PHONY: undeploy +undeploy: kustomize ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion. + $(KUSTOMIZE) build config/default | $(KUBECTL) delete --ignore-not-found=$(ignore-not-found) -f - + +##@ Dependencies + +## Location to install dependencies to +LOCALBIN ?= $(shell pwd)/bin +$(LOCALBIN): + mkdir -p $(LOCALBIN) + +## Tool Binaries +KUBECTL ?= kubectl +KUSTOMIZE ?= $(LOCALBIN)/kustomize +CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen +ENVTEST ?= $(LOCALBIN)/setup-envtest +GOLANGCI_LINT = $(LOCALBIN)/golangci-lint +KIND = $(LOCALBIN)/kind +GRPCURL = $(LOCALBIN)/grpcurl + +## Tool Versions +KUSTOMIZE_VERSION ?= v5.4.1 +CONTROLLER_TOOLS_VERSION ?= v0.16.3 +ENVTEST_VERSION ?= release-0.18 +GOLANGCI_LINT_VERSION ?= v2.5.0 +KIND_VERSION ?= v0.27.0 +GRPCURL_VERSION ?= v1.9.2 + +.PHONY: kustomize +kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary. +$(KUSTOMIZE): $(LOCALBIN) + $(call go-install-tool,$(KUSTOMIZE),sigs.k8s.io/kustomize/kustomize/v5,$(KUSTOMIZE_VERSION)) + +.PHONY: controller-gen +controller-gen: $(CONTROLLER_GEN) ## Download controller-gen locally if necessary. +$(CONTROLLER_GEN): $(LOCALBIN) + $(call go-install-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen,$(CONTROLLER_TOOLS_VERSION)) + +.PHONY: protobuf-gen +protobuf-gen: + podman run --volume "$(shell pwd):/workspace" --workdir /workspace docker.io/bufbuild/buf:latest generate + +.PHONY: envtest +envtest: $(ENVTEST) ## Download setup-envtest locally if necessary. +$(ENVTEST): $(LOCALBIN) + $(call go-install-tool,$(ENVTEST),sigs.k8s.io/controller-runtime/tools/setup-envtest,$(ENVTEST_VERSION)) + +.PHONY: golangci-lint +golangci-lint: $(GOLANGCI_LINT) ## Download golangci-lint locally if necessary. +$(GOLANGCI_LINT): $(LOCALBIN) + $(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/v2/cmd/golangci-lint,$(GOLANGCI_LINT_VERSION)) + +.PHONY: grpcurl +grpcurl: $(GRPCURL) ## Download grpcurl locally if necessary. +$(GRPCURL): $(LOCALBIN) + $(call go-install-tool,$(GRPCURL),github.com/fullstorydev/grpcurl/cmd/grpcurl,$(GRPCURL_VERSION)) + +.PHONY: kind +kind: $(KIND) +$(KIND): $(LOCALBIN) + $(call go-install-tool,$(KIND),sigs.k8s.io/kind,$(KIND_VERSION)) + +.PHONY: cluster +cluster: $(KIND) + $(KIND) get clusters | grep jumpstarter || $(KIND) create cluster --name jumpstarter --config hack/kind_cluster.yaml + + +clean: $(KIND) + $(KIND) delete cluster --name jumpstarter + + + +# go-install-tool will 'go install' any package with custom target and name of binary, if it doesn't exist +# $1 - target path with name of binary +# $2 - package url which can be installed +# $3 - specific version of package +define go-install-tool +@[ -f "$(1)-$(3)" ] || { \ +set -e; \ +package=$(2)@$(3) ;\ +echo "Downloading $${package}" ;\ +rm -f $(1) || true ;\ +GOBIN=$(LOCALBIN) go install $${package} ;\ +mv $(1) $(1)-$(3) ;\ +} ;\ +ln -sf $(1)-$(3) $(1) +endef diff --git a/controller/PROJECT b/controller/PROJECT new file mode 100644 index 000000000..fce14faea --- /dev/null +++ b/controller/PROJECT @@ -0,0 +1,38 @@ +# Code generated by tool. DO NOT EDIT. +# This file is used to track the info used to scaffold your project +# and allow the plugins properly work. +# More info: https://book.kubebuilder.io/reference/project-config.html +domain: jumpstarter.dev +layout: +- go.kubebuilder.io/v4 +projectName: jumpstarter-controller +repo: github.com/jumpstarter-dev/jumpstarter-controller +resources: +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: jumpstarter.dev + kind: Exporter + path: github.com/jumpstarter-dev/jumpstarter-controller/api/v1alpha1 + version: v1alpha1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: jumpstarter.dev + kind: Identity + path: github.com/jumpstarter-dev/jumpstarter-controller/api/v1alpha1 + version: v1alpha1 +- controller: true + domain: jumpstarter.dev + kind: Lease + version: v1alpha1 +- api: + crdVersion: v1 + namespaced: true + domain: jumpstarter.dev + kind: ExporterAccessPolicy + path: github.com/jumpstarter-dev/jumpstarter-controller/api/v1alpha1 + version: v1alpha1 +version: "3" diff --git a/controller/README.md b/controller/README.md new file mode 100644 index 000000000..121715874 --- /dev/null +++ b/controller/README.md @@ -0,0 +1,119 @@ +# jumpstarter-controller + +[![Build and push container image](https://github.com/jumpstarter-dev/jumpstarter-controller/actions/workflows/build.yaml/badge.svg)](https://github.com/jumpstarter-dev/jumpstarter-controller/actions/workflows/build.yaml) +![GitHub Release](https://img.shields.io/github/v/release/jumpstarter-dev/jumpstarter-controller) +![GitHub Downloads (all assets, all releases)](https://img.shields.io/github/downloads/jumpstarter-dev/jumpstarter-controller/total) +[![Ask DeepWiki](https://deepwiki.com/badge.svg)](https://deepwiki.com/jumpstarter-dev/jumpstarter-controller) + +// TODO(user): Add simple overview of use/purpose + +## Description +// TODO(user): An in-depth paragraph about your project and overview of use + +## Getting Started + +### Prerequisites +- go version v1.22.0+ +- kubectl version v1.11.3+. +- Access to a Kubernetes v1.11.3+ cluster. + +### To Deploy on the cluster +**Build and push your image to the location specified by `IMG`:** + +```sh +make docker-push IMG=/jumpstarter-controller:tag +``` + +**NOTE:** This image ought to be published in the personal registry you specified. +And it is required to have access to pull the image from the working environment. +Make sure you have the proper permission to the registry if the above commands don’t work. + +**Install the CRDs into the cluster:** + +```sh +make install +``` + +**Deploy the Manager to the cluster with the image specified by `IMG`:** + +```sh +make deploy IMG=/jumpstarter-router:tag +``` + +> **NOTE**: If you encounter RBAC errors, you may need to grant yourself cluster-admin +privileges or be logged in as admin. + +**Create instances of your solution** +You can apply the samples (examples) from the config/sample: + +```sh +kubectl apply -k config/samples/ +``` + +>**NOTE**: Ensure that the samples has default values to test it out. + +### To Uninstall +**Delete the instances (CRs) from the cluster:** + +```sh +kubectl delete -k config/samples/ +``` + +**Delete the APIs(CRDs) from the cluster:** + +```sh +make uninstall +``` + +**UnDeploy the controller from the cluster:** + +```sh +make undeploy +``` + +## Project Distribution + +Following are the steps to build the installer and distribute this project to users. + +1. Build the installer for the image built and published in the registry: + +```sh +make build-installer IMG=/jumpstarter-router:tag +``` + +NOTE: The makefile target mentioned above generates an 'install.yaml' +file in the dist directory. This file contains all the resources built +with Kustomize, which are necessary to install this project without +its dependencies. + +2. Using the installer + +Users can just run kubectl apply -f to install the project, i.e.: + +```sh +kubectl apply -f https://raw.githubusercontent.com//jumpstarter-router//dist/install.yaml +``` + +## Contributing +// TODO(user): Add detailed information on how you would like others to contribute to this project + +**NOTE:** Run `make help` for more information on all potential `make` targets + +More information can be found via the [Kubebuilder Documentation](https://book.kubebuilder.io/introduction.html) + +## License + +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + diff --git a/controller/api/v1alpha1/authenticationconfiguration_types.go b/controller/api/v1alpha1/authenticationconfiguration_types.go new file mode 100644 index 000000000..3ad2c09bf --- /dev/null +++ b/controller/api/v1alpha1/authenticationconfiguration_types.go @@ -0,0 +1,24 @@ +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apiserverv1beta1 "k8s.io/apiserver/pkg/apis/apiserver/v1beta1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// AuthenticationConfiguration provides versioned configuration for authentication. +type AuthenticationConfiguration struct { + metav1.TypeMeta + + Internal Internal `json:"internal"` + JWT []apiserverv1beta1.JWTAuthenticator `json:"jwt"` +} + +type Internal struct { + Prefix string `json:"prefix"` +} + +func init() { + SchemeBuilder.Register(&AuthenticationConfiguration{}) +} diff --git a/controller/api/v1alpha1/client_helpers.go b/controller/api/v1alpha1/client_helpers.go new file mode 100644 index 000000000..7c425354e --- /dev/null +++ b/controller/api/v1alpha1/client_helpers.go @@ -0,0 +1,18 @@ +package v1alpha1 + +import "strings" + +func (c *Client) InternalSubject() string { + namespace, uid := getNamespaceAndUID(c.Namespace, c.UID, c.Annotations) + return strings.Join([]string{"client", namespace, c.Name, uid}, ":") +} + +func (c *Client) Usernames(prefix string) []string { + usernames := []string{prefix + c.InternalSubject()} + + if c.Spec.Username != nil { + usernames = append(usernames, *c.Spec.Username) + } + + return usernames +} diff --git a/controller/api/v1alpha1/client_helpers_test.go b/controller/api/v1alpha1/client_helpers_test.go new file mode 100644 index 000000000..aa3301a25 --- /dev/null +++ b/controller/api/v1alpha1/client_helpers_test.go @@ -0,0 +1,84 @@ +package v1alpha1 + +import ( + "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +func TestClient_InternalSubject(t *testing.T) { + t.Run("without annotations", func(t *testing.T) { + c := &Client{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-client", + Namespace: "default", + UID: types.UID("123e4567-e89b-12d3-a456-426614174000"), + }, + } + expected := "client:default:my-client:123e4567-e89b-12d3-a456-426614174000" + if got := c.InternalSubject(); got != expected { + t.Errorf("got %v, want %v", got, expected) + } + }) + + t.Run("with both migrated annotations", func(t *testing.T) { + c := &Client{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-client", + Namespace: "default", + UID: types.UID("123e4567-e89b-12d3-a456-426614174000"), + Annotations: map[string]string{ + AnnotationMigratedNamespace: "old-namespace", + AnnotationMigratedUID: "old-uid-value", + }, + }, + } + expected := "client:old-namespace:my-client:old-uid-value" + if got := c.InternalSubject(); got != expected { + t.Errorf("got %v, want %v", got, expected) + } + }) + + t.Run("empty annotation values are ignored", func(t *testing.T) { + c := &Client{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-client", + Namespace: "default", + UID: types.UID("123e4567-e89b-12d3-a456-426614174000"), + Annotations: map[string]string{ + AnnotationMigratedNamespace: "", + AnnotationMigratedUID: "", + }, + }, + } + expected := "client:default:my-client:123e4567-e89b-12d3-a456-426614174000" + if got := c.InternalSubject(); got != expected { + t.Errorf("got %v, want %v", got, expected) + } + }) +} + +func TestClient_Usernames(t *testing.T) { + t.Run("without custom username", func(t *testing.T) { + c := &Client{ + ObjectMeta: metav1.ObjectMeta{Name: "my-client", Namespace: "default", UID: types.UID("123")}, + Spec: ClientSpec{}, + } + got := c.Usernames("internal:") + if len(got) != 1 || got[0] != "internal:client:default:my-client:123" { + t.Errorf("got %v, want single internal subject", got) + } + }) + + t.Run("with custom username", func(t *testing.T) { + c := &Client{ + ObjectMeta: metav1.ObjectMeta{Name: "my-client", Namespace: "default", UID: types.UID("123")}, + Spec: ClientSpec{Username: stringPtr("custom-user")}, + } + got := c.Usernames("internal:") + if len(got) != 2 || got[1] != "custom-user" { + t.Errorf("got %v, want internal subject and custom username", got) + } + }) +} diff --git a/controller/api/v1alpha1/client_types.go b/controller/api/v1alpha1/client_types.go new file mode 100644 index 000000000..73a3d6a01 --- /dev/null +++ b/controller/api/v1alpha1/client_types.go @@ -0,0 +1,62 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// ClientSpec defines the desired state of Identity +type ClientSpec struct { + Username *string `json:"username,omitempty"` +} + +// ClientStatus defines the observed state of Identity +type ClientStatus struct { + // Status field for the clients + Credential *corev1.LocalObjectReference `json:"credential,omitempty"` + Endpoint string `json:"endpoint,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Client is the Schema for the identities API +type Client struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ClientSpec `json:"spec,omitempty"` + Status ClientStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ClientList contains a list of Identity +type ClientList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Client `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Client{}, &ClientList{}) +} diff --git a/controller/api/v1alpha1/common_helpers.go b/controller/api/v1alpha1/common_helpers.go new file mode 100644 index 000000000..bda44ee39 --- /dev/null +++ b/controller/api/v1alpha1/common_helpers.go @@ -0,0 +1,21 @@ +package v1alpha1 + +import "k8s.io/apimachinery/pkg/types" + +// getNamespaceAndUID returns the namespace and UID for an object, applying migration +// annotation overrides if present. +func getNamespaceAndUID(namespace string, uid types.UID, annotations map[string]string) (string, string) { + resultNamespace := namespace + resultUID := string(uid) + + if annotations != nil { + if migratedNamespace, ok := annotations[AnnotationMigratedNamespace]; ok && migratedNamespace != "" { + resultNamespace = migratedNamespace + } + if migratedUID, ok := annotations[AnnotationMigratedUID]; ok && migratedUID != "" { + resultUID = migratedUID + } + } + + return resultNamespace, resultUID +} diff --git a/controller/api/v1alpha1/common_helpers_test.go b/controller/api/v1alpha1/common_helpers_test.go new file mode 100644 index 000000000..26aa8701a --- /dev/null +++ b/controller/api/v1alpha1/common_helpers_test.go @@ -0,0 +1,122 @@ +package v1alpha1 + +import ( + "testing" + + "k8s.io/apimachinery/pkg/types" +) + +func TestGetNamespaceAndUID(t *testing.T) { + tests := []struct { + name string + namespace string + uid types.UID + annotations map[string]string + expectedNamespace string + expectedUID string + }{ + { + name: "no annotations", + namespace: "default", + uid: types.UID("123e4567-e89b-12d3-a456-426614174000"), + annotations: nil, + expectedNamespace: "default", + expectedUID: "123e4567-e89b-12d3-a456-426614174000", + }, + { + name: "empty annotations map", + namespace: "default", + uid: types.UID("123e4567-e89b-12d3-a456-426614174000"), + annotations: map[string]string{}, + expectedNamespace: "default", + expectedUID: "123e4567-e89b-12d3-a456-426614174000", + }, + { + name: "migrated namespace only", + namespace: "default", + uid: types.UID("123e4567-e89b-12d3-a456-426614174000"), + annotations: map[string]string{ + AnnotationMigratedNamespace: "migrated-ns", + }, + expectedNamespace: "migrated-ns", + expectedUID: "123e4567-e89b-12d3-a456-426614174000", + }, + { + name: "migrated uid only", + namespace: "default", + uid: types.UID("123e4567-e89b-12d3-a456-426614174000"), + annotations: map[string]string{ + AnnotationMigratedUID: "migrated-uid-value", + }, + expectedNamespace: "default", + expectedUID: "migrated-uid-value", + }, + { + name: "both migrated namespace and uid", + namespace: "default", + uid: types.UID("123e4567-e89b-12d3-a456-426614174000"), + annotations: map[string]string{ + AnnotationMigratedNamespace: "migrated-ns", + AnnotationMigratedUID: "migrated-uid-value", + }, + expectedNamespace: "migrated-ns", + expectedUID: "migrated-uid-value", + }, + { + name: "empty migrated namespace value ignored", + namespace: "default", + uid: types.UID("123e4567-e89b-12d3-a456-426614174000"), + annotations: map[string]string{ + AnnotationMigratedNamespace: "", + }, + expectedNamespace: "default", + expectedUID: "123e4567-e89b-12d3-a456-426614174000", + }, + { + name: "empty migrated uid value ignored", + namespace: "default", + uid: types.UID("123e4567-e89b-12d3-a456-426614174000"), + annotations: map[string]string{ + AnnotationMigratedUID: "", + }, + expectedNamespace: "default", + expectedUID: "123e4567-e89b-12d3-a456-426614174000", + }, + { + name: "both empty values ignored", + namespace: "default", + uid: types.UID("123e4567-e89b-12d3-a456-426614174000"), + annotations: map[string]string{ + AnnotationMigratedNamespace: "", + AnnotationMigratedUID: "", + }, + expectedNamespace: "default", + expectedUID: "123e4567-e89b-12d3-a456-426614174000", + }, + { + name: "other annotations present", + namespace: "default", + uid: types.UID("123e4567-e89b-12d3-a456-426614174000"), + annotations: map[string]string{ + "other.annotation/key": "value", + AnnotationMigratedNamespace: "migrated-ns", + "another.annotation": "another-value", + }, + expectedNamespace: "migrated-ns", + expectedUID: "123e4567-e89b-12d3-a456-426614174000", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotNamespace, gotUID := getNamespaceAndUID(tt.namespace, tt.uid, tt.annotations) + + if gotNamespace != tt.expectedNamespace { + t.Errorf("getNamespaceAndUID() namespace = %v, want %v", gotNamespace, tt.expectedNamespace) + } + if gotUID != tt.expectedUID { + t.Errorf("getNamespaceAndUID() uid = %v, want %v", gotUID, tt.expectedUID) + } + }) + } +} diff --git a/controller/api/v1alpha1/device_types.go b/controller/api/v1alpha1/device_types.go new file mode 100644 index 000000000..4d1534047 --- /dev/null +++ b/controller/api/v1alpha1/device_types.go @@ -0,0 +1,7 @@ +package v1alpha1 + +type Device struct { + Uuid string `json:"uuid,omitempty"` + ParentUuid *string `json:"parent_uuid,omitempty"` + Labels map[string]string `json:"labels,omitempty"` +} diff --git a/controller/api/v1alpha1/exporter_helpers.go b/controller/api/v1alpha1/exporter_helpers.go new file mode 100644 index 000000000..2890f80a0 --- /dev/null +++ b/controller/api/v1alpha1/exporter_helpers.go @@ -0,0 +1,47 @@ +package v1alpha1 + +import ( + "strings" + + cpb "github.com/jumpstarter-dev/jumpstarter-controller/internal/protocol/jumpstarter/client/v1" + "github.com/jumpstarter-dev/jumpstarter-controller/internal/service/utils" + "k8s.io/apimachinery/pkg/api/meta" + kclient "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (e *Exporter) InternalSubject() string { + namespace, uid := getNamespaceAndUID(e.Namespace, e.UID, e.Annotations) + return strings.Join([]string{"exporter", namespace, e.Name, uid}, ":") +} + +func (e *Exporter) Usernames(prefix string) []string { + usernames := []string{prefix + e.InternalSubject()} + + if e.Spec.Username != nil { + usernames = append(usernames, *e.Spec.Username) + } + + return usernames +} + +func (e *Exporter) ToProtobuf() *cpb.Exporter { + // get online status from conditions + isOnline := meta.IsStatusConditionTrue(e.Status.Conditions, string(ExporterConditionTypeOnline)) + + return &cpb.Exporter{ + Name: utils.UnparseExporterIdentifier(kclient.ObjectKeyFromObject(e)), + Labels: e.Labels, + Online: isOnline, + } +} + +func (l *ExporterList) ToProtobuf() *cpb.ListExportersResponse { + var jexporters []*cpb.Exporter + for _, jexporter := range l.Items { + jexporters = append(jexporters, jexporter.ToProtobuf()) + } + return &cpb.ListExportersResponse{ + Exporters: jexporters, + NextPageToken: l.Continue, + } +} diff --git a/controller/api/v1alpha1/exporter_helpers_test.go b/controller/api/v1alpha1/exporter_helpers_test.go new file mode 100644 index 000000000..9c7e47a49 --- /dev/null +++ b/controller/api/v1alpha1/exporter_helpers_test.go @@ -0,0 +1,89 @@ +package v1alpha1 + +import ( + "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +func TestExporter_InternalSubject(t *testing.T) { + t.Run("without annotations", func(t *testing.T) { + e := &Exporter{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-exporter", + Namespace: "default", + UID: types.UID("123e4567-e89b-12d3-a456-426614174000"), + }, + } + expected := "exporter:default:my-exporter:123e4567-e89b-12d3-a456-426614174000" + if got := e.InternalSubject(); got != expected { + t.Errorf("got %v, want %v", got, expected) + } + }) + + t.Run("with both migrated annotations", func(t *testing.T) { + e := &Exporter{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-exporter", + Namespace: "default", + UID: types.UID("123e4567-e89b-12d3-a456-426614174000"), + Annotations: map[string]string{ + AnnotationMigratedNamespace: "old-namespace", + AnnotationMigratedUID: "old-uid-value", + }, + }, + } + expected := "exporter:old-namespace:my-exporter:old-uid-value" + if got := e.InternalSubject(); got != expected { + t.Errorf("got %v, want %v", got, expected) + } + }) + + t.Run("empty annotation values are ignored", func(t *testing.T) { + e := &Exporter{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-exporter", + Namespace: "default", + UID: types.UID("123e4567-e89b-12d3-a456-426614174000"), + Annotations: map[string]string{ + AnnotationMigratedNamespace: "", + AnnotationMigratedUID: "", + }, + }, + } + expected := "exporter:default:my-exporter:123e4567-e89b-12d3-a456-426614174000" + if got := e.InternalSubject(); got != expected { + t.Errorf("got %v, want %v", got, expected) + } + }) +} + +func TestExporter_Usernames(t *testing.T) { + t.Run("without custom username", func(t *testing.T) { + e := &Exporter{ + ObjectMeta: metav1.ObjectMeta{Name: "my-exporter", Namespace: "default", UID: types.UID("123")}, + Spec: ExporterSpec{}, + } + got := e.Usernames("internal:") + if len(got) != 1 || got[0] != "internal:exporter:default:my-exporter:123" { + t.Errorf("got %v, want single internal subject", got) + } + }) + + t.Run("with custom username", func(t *testing.T) { + e := &Exporter{ + ObjectMeta: metav1.ObjectMeta{Name: "my-exporter", Namespace: "default", UID: types.UID("123")}, + Spec: ExporterSpec{Username: stringPtr("custom-user")}, + } + got := e.Usernames("internal:") + if len(got) != 2 || got[1] != "custom-user" { + t.Errorf("got %v, want internal subject and custom username", got) + } + }) +} + +// Helper function to create string pointers +func stringPtr(s string) *string { + return &s +} diff --git a/controller/api/v1alpha1/exporter_types.go b/controller/api/v1alpha1/exporter_types.go new file mode 100644 index 000000000..9f17dd999 --- /dev/null +++ b/controller/api/v1alpha1/exporter_types.go @@ -0,0 +1,73 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// ExporterSpec defines the desired state of Exporter +type ExporterSpec struct { + Username *string `json:"username,omitempty"` +} + +// ExporterStatus defines the observed state of Exporter +type ExporterStatus struct { + // Exporter status fields + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` + Credential *corev1.LocalObjectReference `json:"credential,omitempty"` + Devices []Device `json:"devices,omitempty"` + LeaseRef *corev1.LocalObjectReference `json:"leaseRef,omitempty"` + LastSeen metav1.Time `json:"lastSeen,omitempty"` + Endpoint string `json:"endpoint,omitempty"` +} + +type ExporterConditionType string + +const ( + ExporterConditionTypeRegistered ExporterConditionType = "Registered" + ExporterConditionTypeOnline ExporterConditionType = "Online" +) + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Exporter is the Schema for the exporters API +type Exporter struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ExporterSpec `json:"spec,omitempty"` + Status ExporterStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ExporterList contains a list of Exporter +type ExporterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Exporter `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Exporter{}, &ExporterList{}) +} diff --git a/controller/api/v1alpha1/exporteraccesspolicy_types.go b/controller/api/v1alpha1/exporteraccesspolicy_types.go new file mode 100644 index 000000000..864a33c7a --- /dev/null +++ b/controller/api/v1alpha1/exporteraccesspolicy_types.go @@ -0,0 +1,71 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +type From struct { + ClientSelector metav1.LabelSelector `json:"clientSelector,omitempty"` +} + +type Policy struct { + Priority int `json:"priority,omitempty"` + From []From `json:"from,omitempty"` + MaximumDuration *metav1.Duration `json:"maximumDuration,omitempty"` + SpotAccess bool `json:"spotAccess,omitempty"` +} + +// ExporterAccessPolicySpec defines the desired state of ExporterAccessPolicy. +type ExporterAccessPolicySpec struct { + ExporterSelector metav1.LabelSelector `json:"exporterSelector,omitempty"` + Policies []Policy `json:"policies,omitempty"` +} + +// ExporterAccessPolicyStatus defines the observed state of ExporterAccessPolicy. +type ExporterAccessPolicyStatus struct { + // Status field for the exporter access policies +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ExporterAccessPolicy is the Schema for the exporteraccesspolicies API. +type ExporterAccessPolicy struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ExporterAccessPolicySpec `json:"spec,omitempty"` + Status ExporterAccessPolicyStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ExporterAccessPolicyList contains a list of ExporterAccessPolicy. +type ExporterAccessPolicyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ExporterAccessPolicy `json:"items"` +} + +func init() { + SchemeBuilder.Register(&ExporterAccessPolicy{}, &ExporterAccessPolicyList{}) +} diff --git a/controller/api/v1alpha1/groupversion_info.go b/controller/api/v1alpha1/groupversion_info.go new file mode 100644 index 000000000..dc506bd3c --- /dev/null +++ b/controller/api/v1alpha1/groupversion_info.go @@ -0,0 +1,44 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1alpha1 contains API Schema definitions for the v1alpha1 API group +// +kubebuilder:object:generate=true +// +groupName=jumpstarter.dev +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "jumpstarter.dev", Version: "v1alpha1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) + +const ( + // AnnotationMigratedNamespace is the annotation key for migrated namespace + AnnotationMigratedNamespace = "jumpstarter.dev/migrated-namespace" + + // AnnotationMigratedUID is the annotation key for migrated UID + AnnotationMigratedUID = "jumpstarter.dev/migrated-uid" +) diff --git a/controller/api/v1alpha1/lease_helpers.go b/controller/api/v1alpha1/lease_helpers.go new file mode 100644 index 000000000..54b42de6a --- /dev/null +++ b/controller/api/v1alpha1/lease_helpers.go @@ -0,0 +1,350 @@ +package v1alpha1 + +import ( + "context" + "fmt" + "time" + + cpb "github.com/jumpstarter-dev/jumpstarter-controller/internal/protocol/jumpstarter/client/v1" + pb "github.com/jumpstarter-dev/jumpstarter-controller/internal/protocol/jumpstarter/v1" + "github.com/jumpstarter-dev/jumpstarter-controller/internal/service/utils" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/timestamppb" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" + "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" + kclient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +// ReconcileLeaseTimeFields calculates missing time fields and validates consistency +// between BeginTime, EndTime, and Duration. Modifies pointers in place. +// +// Supported lease specification patterns: +// 1. Duration only (no BeginTime/EndTime): immediate start, runs for Duration +// - BeginTime set by controller when exporter acquired +// - EndTime = Status.BeginTime + Duration (calculated at runtime) +// +// 2. EndTime only: INVALID - cannot infer Duration without BeginTime or explicit Duration +// - Returns error: "duration is required (must specify Duration, or both BeginTime and EndTime)" +// +// 3. BeginTime + Duration: scheduled start at BeginTime, runs for Duration +// - Lease waits until BeginTime, then acquires exporter +// - EndTime = BeginTime + Duration (calculated at runtime) +// +// 4. BeginTime + EndTime: scheduled window, Duration computed from times +// - Duration = EndTime - BeginTime (auto-calculated here) +// - Validates EndTime > BeginTime (positive duration) +// +// 5. EndTime + Duration: scheduled end, BeginTime computed as EndTime - Duration +// - BeginTime = EndTime - Duration (auto-calculated here) +// - Useful for "finish by" scheduling +// +// 6. BeginTime + EndTime + Duration: all three specified, validates consistency +// - Validates Duration == EndTime - BeginTime +// - Returns error if inconsistent: "duration conflicts with begin_time and end_time" +// +// Note: The controller never auto-populates Spec.EndTime. It calculates expiration time +// on-demand from available fields, keeping Spec.EndTime meaningful only when explicitly +// set by the user. See lease_controller.go reconcileStatusEnded for expiration logic. +func ReconcileLeaseTimeFields(beginTime, endTime **metav1.Time, duration **metav1.Duration) error { + if *beginTime != nil && *endTime != nil { + // Calculate duration from explicit begin/end times + calculated := (*endTime).Sub((*beginTime).Time) + if *duration != nil && (*duration).Duration > 0 && (*duration).Duration != calculated { + return fmt.Errorf("duration conflicts with begin_time and end_time") + } + *duration = &metav1.Duration{Duration: calculated} + } else if *endTime != nil && *duration != nil && (*duration).Duration > 0 { + // Calculate BeginTime from EndTime - Duration (scheduled lease ending at specific time) + *beginTime = &metav1.Time{Time: (*endTime).Add(-(*duration).Duration)} + } + + // Validate final duration is positive (rejects nil, negative, zero) + if *duration == nil { + return fmt.Errorf("duration is required (must specify Duration, or both BeginTime and EndTime)") + } + if (*duration).Duration <= 0 { + return fmt.Errorf("duration must be positive, got %v", (*duration).Duration) + } + return nil +} + +// ParseLabelSelector parses a label selector string and converts it to metav1.LabelSelector. +// This function supports the != operator by first parsing with labels.Parse() which supports +// all label selector syntax including !=, then converting to metav1.LabelSelector format. +func ParseLabelSelector(selectorStr string) (*metav1.LabelSelector, error) { + // First, try to parse using labels.Parse() which supports != operator + parsedSelector, err := labels.Parse(selectorStr) + if err != nil { + return nil, fmt.Errorf("failed to parse label selector: %w", err) + } + + // Extract requirements from the parsed selector + requirements, selectable := parsedSelector.Requirements() + if !selectable { + return &metav1.LabelSelector{}, nil + } + + // Convert requirements to metav1.LabelSelector format + matchLabels := make(map[string]string) + var matchExpressions []metav1.LabelSelectorRequirement + + // Track NotEquals requirements by key so we can combine them into NotIn + notEqualsByKey := make(map[string][]string) + + for _, req := range requirements { + key := req.Key() + operator := req.Operator() + values := req.ValuesUnsorted() + + switch operator { + case selection.Equals: + // For exact match with single value, use matchLabels + if len(values) == 1 { + // Check if we already have an equality requirement for this key with a different value + if existingValue, exists := matchLabels[key]; exists && existingValue != values[0] { + return nil, fmt.Errorf("invalid selector: label %s cannot have multiple equality requirements with different values (%s and %s)", key, existingValue, values[0]) + } + matchLabels[key] = values[0] + } else { + // Multiple values should use In operator + matchExpressions = append(matchExpressions, metav1.LabelSelectorRequirement{ + Key: key, + Operator: metav1.LabelSelectorOpIn, + Values: values, + }) + } + case selection.NotEquals: + // Accumulate != requirements for the same key to combine into NotIn + if len(values) != 1 { + return nil, fmt.Errorf("invalid selector: != operator requires exactly one value") + } + notEqualsByKey[key] = append(notEqualsByKey[key], values[0]) + case selection.In: + matchExpressions = append(matchExpressions, metav1.LabelSelectorRequirement{ + Key: key, + Operator: metav1.LabelSelectorOpIn, + Values: values, + }) + case selection.NotIn: + matchExpressions = append(matchExpressions, metav1.LabelSelectorRequirement{ + Key: key, + Operator: metav1.LabelSelectorOpNotIn, + Values: values, + }) + case selection.Exists: + matchExpressions = append(matchExpressions, metav1.LabelSelectorRequirement{ + Key: key, + Operator: metav1.LabelSelectorOpExists, + Values: []string{}, + }) + case selection.DoesNotExist: + matchExpressions = append(matchExpressions, metav1.LabelSelectorRequirement{ + Key: key, + Operator: metav1.LabelSelectorOpDoesNotExist, + Values: []string{}, + }) + default: + return nil, fmt.Errorf("unsupported label selector operator: %v", operator) + } + } + + // Convert accumulated NotEquals requirements to NotIn expressions + for key, vals := range notEqualsByKey { + matchExpressions = append(matchExpressions, metav1.LabelSelectorRequirement{ + Key: key, + Operator: metav1.LabelSelectorOpNotIn, + Values: vals, + }) + } + + return &metav1.LabelSelector{ + MatchLabels: matchLabels, + MatchExpressions: matchExpressions, + }, nil +} + +func LeaseFromProtobuf( + req *cpb.Lease, + key types.NamespacedName, + clientRef corev1.LocalObjectReference, +) (*Lease, error) { + selector, err := ParseLabelSelector(req.Selector) + if err != nil { + return nil, err + } + + var beginTime, endTime *metav1.Time + var duration *metav1.Duration + + if req.BeginTime != nil { + beginTime = &metav1.Time{Time: req.BeginTime.AsTime()} + } + if req.EndTime != nil { + endTime = &metav1.Time{Time: req.EndTime.AsTime()} + } + if req.Duration != nil { + duration = &metav1.Duration{Duration: req.Duration.AsDuration()} + } + if err := ReconcileLeaseTimeFields(&beginTime, &endTime, &duration); err != nil { + return nil, err + } + + return &Lease{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: key.Namespace, + Name: key.Name, + }, + Spec: LeaseSpec{ + ClientRef: clientRef, + Duration: duration, + Selector: *selector, + BeginTime: beginTime, + EndTime: endTime, + }, + }, nil +} + +func (l *Lease) ToProtobuf() *cpb.Lease { + var conditions []*pb.Condition + for _, condition := range l.Status.Conditions { + conditions = append(conditions, &pb.Condition{ + Type: &condition.Type, + Status: (*string)(&condition.Status), + ObservedGeneration: &condition.ObservedGeneration, + LastTransitionTime: &pb.Time{ + Seconds: &condition.LastTransitionTime.ProtoTime().Seconds, + Nanos: &condition.LastTransitionTime.ProtoTime().Nanos, + }, + Reason: &condition.Reason, + Message: &condition.Message, + }) + } + + lease := cpb.Lease{ + Name: fmt.Sprintf("namespaces/%s/leases/%s", l.Namespace, l.Name), + Selector: metav1.FormatLabelSelector(&l.Spec.Selector), + Client: ptr.To(fmt.Sprintf("namespaces/%s/clients/%s", l.Namespace, l.Spec.ClientRef.Name)), + Conditions: conditions, + } + if l.Spec.Duration != nil { + lease.Duration = durationpb.New(l.Spec.Duration.Duration) + } + + // Requested/planned times from Spec + if l.Spec.BeginTime != nil { + lease.BeginTime = timestamppb.New(l.Spec.BeginTime.Time) + } + if l.Spec.EndTime != nil { + lease.EndTime = timestamppb.New(l.Spec.EndTime.Time) + } + + // Actual times from Status + if l.Status.BeginTime != nil { + lease.EffectiveBeginTime = timestamppb.New(l.Status.BeginTime.Time) + endTime := time.Now() + if l.Status.EndTime != nil { + endTime = l.Status.EndTime.Time + lease.EffectiveEndTime = timestamppb.New(endTime) + } + // Final effective duration or current one so far while active. Non-negative to handle clock skew. + effectiveDuration := max(endTime.Sub(l.Status.BeginTime.Time), 0) + lease.EffectiveDuration = durationpb.New(effectiveDuration) + } + if l.Status.ExporterRef != nil { + lease.Exporter = ptr.To(utils.UnparseExporterIdentifier(kclient.ObjectKey{ + Namespace: l.Namespace, + Name: l.Status.ExporterRef.Name, + })) + } + + return &lease +} + +func (l *LeaseList) ToProtobuf() *cpb.ListLeasesResponse { + var jleases []*cpb.Lease + for _, jlease := range l.Items { + jleases = append(jleases, jlease.ToProtobuf()) + } + return &cpb.ListLeasesResponse{ + Leases: jleases, + NextPageToken: l.Continue, + } +} + +func (l *Lease) GetExporterSelector() (labels.Selector, error) { + return metav1.LabelSelectorAsSelector(&l.Spec.Selector) +} + +func (l *Lease) SetStatusPending(reason, messageFormat string, a ...any) { + l.SetStatusCondition(LeaseConditionTypePending, true, reason, messageFormat, a...) +} + +func (l *Lease) SetStatusReady(status bool, reason, messageFormat string, a ...any) { + l.SetStatusCondition(LeaseConditionTypeReady, status, reason, messageFormat, a...) +} + +func (l *Lease) SetStatusUnsatisfiable(reason, messageFormat string, a ...any) { + l.SetStatusCondition(LeaseConditionTypeUnsatisfiable, true, reason, messageFormat, a...) +} + +func (l *Lease) SetStatusInvalid(reason, messageFormat string, a ...any) { + l.SetStatusCondition(LeaseConditionTypeInvalid, true, reason, messageFormat, a...) +} + +func (l *Lease) SetStatusCondition( + condition LeaseConditionType, + status bool, + reason, messageFormat string, a ...any) { + + var statusCondition metav1.ConditionStatus + + if status { + statusCondition = metav1.ConditionTrue + } else { + statusCondition = metav1.ConditionFalse + } + + meta.SetStatusCondition(&l.Status.Conditions, metav1.Condition{ + Type: string(condition), + Status: statusCondition, + ObservedGeneration: l.Generation, + LastTransitionTime: metav1.Time{ + Time: time.Now(), + }, + Reason: reason, + Message: fmt.Sprintf(messageFormat, a...), + }) +} + +func (l *Lease) GetExporterName() string { + if l.Status.ExporterRef == nil { + return "(none)" + } + return l.Status.ExporterRef.Name +} + +func (l *Lease) GetClientName() string { + return l.Spec.ClientRef.Name +} + +func (l *Lease) Release(ctx context.Context) { + logger := log.FromContext(ctx) + logger.Info("The lease has been marked for release", "lease", l.Name, "exporter", l.GetExporterName(), "client", l.GetClientName()) + l.SetStatusReady(false, "Released", "The lease was marked for release") + l.Status.Ended = true + l.Status.EndTime = &metav1.Time{Time: time.Now()} +} + +func (l *Lease) Expire(ctx context.Context) { + logger := log.FromContext(ctx) + logger.Info("The lease has expired", "lease", l.Name, "exporter", l.GetExporterName(), "client", l.GetClientName()) + l.SetStatusReady(false, "Expired", "The lease has expired") + l.Status.Ended = true + l.Status.EndTime = &metav1.Time{Time: time.Now()} +} diff --git a/controller/api/v1alpha1/lease_helpers_test.go b/controller/api/v1alpha1/lease_helpers_test.go new file mode 100644 index 000000000..bf747bfb0 --- /dev/null +++ b/controller/api/v1alpha1/lease_helpers_test.go @@ -0,0 +1,277 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" +) + +func TestLeaseHelpers(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Lease Helpers Suite") +} + +var _ = Describe("ParseLabelSelector", func() { + Context("when parsing simple selectors", func() { + It("should parse a single key=value selector", func() { + selector, err := ParseLabelSelector("app=myapp") + Expect(err).NotTo(HaveOccurred()) + Expect(selector).NotTo(BeNil()) + Expect(selector.MatchLabels).To(HaveKeyWithValue("app", "myapp")) + Expect(selector.MatchExpressions).To(BeEmpty()) + }) + + It("should parse multiple key=value selectors", func() { + selector, err := ParseLabelSelector("app=myapp,env=prod") + Expect(err).NotTo(HaveOccurred()) + Expect(selector).NotTo(BeNil()) + Expect(selector.MatchLabels).To(HaveKeyWithValue("app", "myapp")) + Expect(selector.MatchLabels).To(HaveKeyWithValue("env", "prod")) + Expect(selector.MatchExpressions).To(BeEmpty()) + }) + + It("should handle selectors with spaces", func() { + selector, err := ParseLabelSelector("app = myapp , env = prod") + Expect(err).NotTo(HaveOccurred()) + Expect(selector).NotTo(BeNil()) + Expect(selector.MatchLabels).To(HaveKeyWithValue("app", "myapp")) + Expect(selector.MatchLabels).To(HaveKeyWithValue("env", "prod")) + }) + }) + + Context("when parsing != operator (the bug fix)", func() { + It("should parse != operator correctly", func() { + selector, err := ParseLabelSelector("revision!=v3") + Expect(err).NotTo(HaveOccurred()) + Expect(selector).NotTo(BeNil()) + Expect(selector.MatchExpressions).To(HaveLen(1)) + Expect(selector.MatchExpressions[0].Key).To(Equal("revision")) + Expect(selector.MatchExpressions[0].Operator).To(Equal(metav1.LabelSelectorOpNotIn)) + Expect(selector.MatchExpressions[0].Values).To(Equal([]string{"v3"})) + }) + + It("should parse != operator with other selectors", func() { + selector, err := ParseLabelSelector("board-type=qc8775,revision!=v3") + Expect(err).NotTo(HaveOccurred()) + Expect(selector).NotTo(BeNil()) + Expect(selector.MatchLabels).To(HaveKeyWithValue("board-type", "qc8775")) + Expect(selector.MatchExpressions).To(HaveLen(1)) + Expect(selector.MatchExpressions[0].Key).To(Equal("revision")) + Expect(selector.MatchExpressions[0].Operator).To(Equal(metav1.LabelSelectorOpNotIn)) + Expect(selector.MatchExpressions[0].Values).To(Equal([]string{"v3"})) + }) + + It("should parse multiple != operators", func() { + selector, err := ParseLabelSelector("revision!=v3,board-type!=qc8774") + Expect(err).NotTo(HaveOccurred()) + Expect(selector).NotTo(BeNil()) + Expect(selector.MatchExpressions).To(HaveLen(2)) + + // Find expressions by key + var revExpr, boardExpr *metav1.LabelSelectorRequirement + for i := range selector.MatchExpressions { + if selector.MatchExpressions[i].Key == "revision" { + revExpr = &selector.MatchExpressions[i] + } + if selector.MatchExpressions[i].Key == "board-type" { + boardExpr = &selector.MatchExpressions[i] + } + } + + Expect(revExpr).NotTo(BeNil()) + Expect(revExpr.Operator).To(Equal(metav1.LabelSelectorOpNotIn)) + Expect(revExpr.Values).To(Equal([]string{"v3"})) + + Expect(boardExpr).NotTo(BeNil()) + Expect(boardExpr.Operator).To(Equal(metav1.LabelSelectorOpNotIn)) + Expect(boardExpr.Values).To(Equal([]string{"qc8774"})) + }) + }) + + Context("when parsing In and NotIn operators", func() { + It("should parse In operator", func() { + selector, err := ParseLabelSelector("env in (prod,staging)") + Expect(err).NotTo(HaveOccurred()) + Expect(selector).NotTo(BeNil()) + Expect(selector.MatchExpressions).To(HaveLen(1)) + Expect(selector.MatchExpressions[0].Key).To(Equal("env")) + Expect(selector.MatchExpressions[0].Operator).To(Equal(metav1.LabelSelectorOpIn)) + Expect(selector.MatchExpressions[0].Values).To(ContainElements("prod", "staging")) + }) + + It("should parse NotIn operator", func() { + selector, err := ParseLabelSelector("env notin (dev,test)") + Expect(err).NotTo(HaveOccurred()) + Expect(selector).NotTo(BeNil()) + Expect(selector.MatchExpressions).To(HaveLen(1)) + Expect(selector.MatchExpressions[0].Key).To(Equal("env")) + Expect(selector.MatchExpressions[0].Operator).To(Equal(metav1.LabelSelectorOpNotIn)) + Expect(selector.MatchExpressions[0].Values).To(ContainElements("dev", "test")) + }) + }) + + Context("when parsing Exists and DoesNotExist operators", func() { + It("should parse Exists operator", func() { + selector, err := ParseLabelSelector("app") + Expect(err).NotTo(HaveOccurred()) + Expect(selector).NotTo(BeNil()) + Expect(selector.MatchExpressions).To(HaveLen(1)) + Expect(selector.MatchExpressions[0].Key).To(Equal("app")) + Expect(selector.MatchExpressions[0].Operator).To(Equal(metav1.LabelSelectorOpExists)) + Expect(selector.MatchExpressions[0].Values).To(BeEmpty()) + }) + + It("should parse DoesNotExist operator", func() { + selector, err := ParseLabelSelector("!app") + Expect(err).NotTo(HaveOccurred()) + Expect(selector).NotTo(BeNil()) + Expect(selector.MatchExpressions).To(HaveLen(1)) + Expect(selector.MatchExpressions[0].Key).To(Equal("app")) + Expect(selector.MatchExpressions[0].Operator).To(Equal(metav1.LabelSelectorOpDoesNotExist)) + Expect(selector.MatchExpressions[0].Values).To(BeEmpty()) + }) + }) + + Context("when parsing complex selectors", func() { + It("should parse a mix of matchLabels and matchExpressions", func() { + selector, err := ParseLabelSelector("app=myapp,env!=prod") + Expect(err).NotTo(HaveOccurred()) + Expect(selector).NotTo(BeNil()) + Expect(selector.MatchLabels).To(HaveKeyWithValue("app", "myapp")) + Expect(selector.MatchExpressions).To(HaveLen(1)) + Expect(selector.MatchExpressions[0].Key).To(Equal("env")) + Expect(selector.MatchExpressions[0].Operator).To(Equal(metav1.LabelSelectorOpNotIn)) + }) + + It("should parse selector with all operator types", func() { + selector, err := ParseLabelSelector("app=myapp,revision!=v3,env in (prod,staging),!debug") + Expect(err).NotTo(HaveOccurred()) + Expect(selector).NotTo(BeNil()) + Expect(selector.MatchLabels).To(HaveKeyWithValue("app", "myapp")) + Expect(selector.MatchExpressions).To(HaveLen(3)) + }) + }) + + Context("when parsing edge cases", func() { + It("should handle empty selector", func() { + selector, err := ParseLabelSelector("") + Expect(err).NotTo(HaveOccurred()) + Expect(selector).NotTo(BeNil()) + Expect(selector.MatchLabels).To(BeEmpty()) + Expect(selector.MatchExpressions).To(BeEmpty()) + }) + + It("should handle selector with special characters in values", func() { + selector, err := ParseLabelSelector("version=v1.2.3,label=my-label") + Expect(err).NotTo(HaveOccurred()) + Expect(selector).NotTo(BeNil()) + Expect(selector.MatchLabels).To(HaveKeyWithValue("version", "v1.2.3")) + Expect(selector.MatchLabels).To(HaveKeyWithValue("label", "my-label")) + }) + + It("should handle selector with underscores in keys", func() { + selector, err := ParseLabelSelector("board_type=qc8775,device_id=123") + Expect(err).NotTo(HaveOccurred()) + Expect(selector).NotTo(BeNil()) + Expect(selector.MatchLabels).To(HaveKeyWithValue("board_type", "qc8775")) + Expect(selector.MatchLabels).To(HaveKeyWithValue("device_id", "123")) + }) + }) + + Context("when parsing invalid selectors", func() { + It("should return error for invalid syntax", func() { + selector, err := ParseLabelSelector("invalid===syntax") + Expect(err).To(HaveOccurred()) + Expect(selector).To(BeNil()) + }) + + It("should reject repeated equality requirements on the same key with different values", func() { + selector, err := ParseLabelSelector("a=1,a=2") + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("cannot have multiple equality requirements")) + Expect(err.Error()).To(ContainSubstring("a")) + Expect(selector).To(BeNil()) + }) + + It("should accept repeated equality requirements on the same key with the same value", func() { + selector, err := ParseLabelSelector("a=1,a=1") + Expect(err).NotTo(HaveOccurred()) + Expect(selector).NotTo(BeNil()) + Expect(selector.MatchLabels).To(HaveKeyWithValue("a", "1")) + }) + + It("should combine multiple != operators for the same key into NotIn", func() { + selector, err := ParseLabelSelector("key!=value1,key!=value2") + Expect(err).NotTo(HaveOccurred()) + Expect(selector).NotTo(BeNil()) + Expect(selector.MatchExpressions).To(HaveLen(1)) + Expect(selector.MatchExpressions[0].Key).To(Equal("key")) + Expect(selector.MatchExpressions[0].Operator).To(Equal(metav1.LabelSelectorOpNotIn)) + Expect(selector.MatchExpressions[0].Values).To(ConsistOf("value1", "value2")) + }) + }) + + Context("round-trip compatibility", func() { + It("should produce a selector that can be converted back to labels.Selector", func() { + originalStr := "board-type=qc8775,revision!=v3" + selector, err := ParseLabelSelector(originalStr) + Expect(err).NotTo(HaveOccurred()) + Expect(selector).NotTo(BeNil()) + + // Convert back to labels.Selector using the standard Kubernetes function + parsedSelector, err := metav1.LabelSelectorAsSelector(selector) + Expect(err).NotTo(HaveOccurred()) + Expect(parsedSelector).NotTo(BeNil()) + + // Verify it matches the expected labels + testLabels := labels.Set{ + "board-type": "qc8775", + "revision": "v3", + } + // Should NOT match because revision!=v3 + Expect(parsedSelector.Matches(testLabels)).To(BeFalse()) + + testLabels2 := labels.Set{ + "board-type": "qc8775", + "revision": "v2", + } + // Should match because revision is v2, not v3 + Expect(parsedSelector.Matches(testLabels2)).To(BeTrue()) + }) + + It("should match labels correctly for != operator", func() { + selector, err := ParseLabelSelector("revision!=v3") + Expect(err).NotTo(HaveOccurred()) + + parsedSelector, err := metav1.LabelSelectorAsSelector(selector) + Expect(err).NotTo(HaveOccurred()) + + // Should match labels without revision=v3 + Expect(parsedSelector.Matches(labels.Set{"revision": "v2"})).To(BeTrue()) + Expect(parsedSelector.Matches(labels.Set{"revision": "v4"})).To(BeTrue()) + + // Should not match labels with revision=v3 + Expect(parsedSelector.Matches(labels.Set{"revision": "v3"})).To(BeFalse()) + Expect(parsedSelector.Matches(labels.Set{"revision": "v3", "other": "value"})).To(BeFalse()) + }) + }) +}) diff --git a/controller/api/v1alpha1/lease_types.go b/controller/api/v1alpha1/lease_types.go new file mode 100644 index 000000000..97d4f5e6d --- /dev/null +++ b/controller/api/v1alpha1/lease_types.go @@ -0,0 +1,99 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// LeaseSpec defines the desired state of Lease +type LeaseSpec struct { + // The client that is requesting the lease + ClientRef corev1.LocalObjectReference `json:"clientRef"` + // Duration of the lease. Must be positive when provided. + // Can be omitted (nil) when both BeginTime and EndTime are provided, + // in which case it's calculated as EndTime - BeginTime. + Duration *metav1.Duration `json:"duration,omitempty"` + // The selector for the exporter to be used + Selector metav1.LabelSelector `json:"selector"` + // The release flag requests the controller to end the lease now + Release bool `json:"release,omitempty"` + // Requested start time. If omitted, lease starts when exporter is acquired. + // Immutable after lease starts (cannot change the past). + BeginTime *metav1.Time `json:"beginTime,omitempty"` + // Requested end time. If specified with BeginTime, Duration is calculated. + // Can be updated to extend or shorten active leases. + EndTime *metav1.Time `json:"endTime,omitempty"` +} + +// LeaseStatus defines the observed state of Lease +type LeaseStatus struct { + // If the lease has been acquired an exporter name is assigned + // and then it can be used, it will be empty while still pending + BeginTime *metav1.Time `json:"beginTime,omitempty"` + EndTime *metav1.Time `json:"endTime,omitempty"` + ExporterRef *corev1.LocalObjectReference `json:"exporterRef,omitempty"` + Ended bool `json:"ended"` + Priority int `json:"priority,omitempty"` + SpotAccess bool `json:"spotAccess,omitempty"` + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` +} + +type LeaseConditionType string + +const ( + LeaseConditionTypePending LeaseConditionType = "Pending" + LeaseConditionTypeReady LeaseConditionType = "Ready" + LeaseConditionTypeUnsatisfiable LeaseConditionType = "Unsatisfiable" + LeaseConditionTypeInvalid LeaseConditionType = "Invalid" +) + +type LeaseLabel string + +const ( + LeaseLabelEnded LeaseLabel = "jumpstarter.dev/lease-ended" + LeaseLabelEndedValue string = "true" +) + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:JSONPath=".status.ended",name=Ended,type=boolean +// +kubebuilder:printcolumn:JSONPath=".spec.clientRef.name",name=Client,type=string +// +kubebuilder:printcolumn:JSONPath=".status.exporterRef.name",name=Exporter,type=string + +// Lease is the Schema for the exporters API +type Lease struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec LeaseSpec `json:"spec,omitempty"` + Status LeaseStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// LeaseList contains a list of Lease +type LeaseList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Lease `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Lease{}, &LeaseList{}) +} diff --git a/controller/api/v1alpha1/zz_generated.deepcopy.go b/controller/api/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..8af9619f6 --- /dev/null +++ b/controller/api/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,584 @@ +//go:build !ignore_autogenerated + +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apiserver/pkg/apis/apiserver/v1beta1" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationConfiguration) DeepCopyInto(out *AuthenticationConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + out.Internal = in.Internal + if in.JWT != nil { + in, out := &in.JWT, &out.JWT + *out = make([]v1beta1.JWTAuthenticator, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationConfiguration. +func (in *AuthenticationConfiguration) DeepCopy() *AuthenticationConfiguration { + if in == nil { + return nil + } + out := new(AuthenticationConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AuthenticationConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Client) DeepCopyInto(out *Client) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Client. +func (in *Client) DeepCopy() *Client { + if in == nil { + return nil + } + out := new(Client) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Client) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientList) DeepCopyInto(out *ClientList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Client, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientList. +func (in *ClientList) DeepCopy() *ClientList { + if in == nil { + return nil + } + out := new(ClientList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClientList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientSpec) DeepCopyInto(out *ClientSpec) { + *out = *in + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientSpec. +func (in *ClientSpec) DeepCopy() *ClientSpec { + if in == nil { + return nil + } + out := new(ClientSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientStatus) DeepCopyInto(out *ClientStatus) { + *out = *in + if in.Credential != nil { + in, out := &in.Credential, &out.Credential + *out = new(v1.LocalObjectReference) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientStatus. +func (in *ClientStatus) DeepCopy() *ClientStatus { + if in == nil { + return nil + } + out := new(ClientStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Device) DeepCopyInto(out *Device) { + *out = *in + if in.ParentUuid != nil { + in, out := &in.ParentUuid, &out.ParentUuid + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Device. +func (in *Device) DeepCopy() *Device { + if in == nil { + return nil + } + out := new(Device) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Exporter) DeepCopyInto(out *Exporter) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Exporter. +func (in *Exporter) DeepCopy() *Exporter { + if in == nil { + return nil + } + out := new(Exporter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Exporter) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExporterAccessPolicy) DeepCopyInto(out *ExporterAccessPolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExporterAccessPolicy. +func (in *ExporterAccessPolicy) DeepCopy() *ExporterAccessPolicy { + if in == nil { + return nil + } + out := new(ExporterAccessPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ExporterAccessPolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExporterAccessPolicyList) DeepCopyInto(out *ExporterAccessPolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ExporterAccessPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExporterAccessPolicyList. +func (in *ExporterAccessPolicyList) DeepCopy() *ExporterAccessPolicyList { + if in == nil { + return nil + } + out := new(ExporterAccessPolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ExporterAccessPolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExporterAccessPolicySpec) DeepCopyInto(out *ExporterAccessPolicySpec) { + *out = *in + in.ExporterSelector.DeepCopyInto(&out.ExporterSelector) + if in.Policies != nil { + in, out := &in.Policies, &out.Policies + *out = make([]Policy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExporterAccessPolicySpec. +func (in *ExporterAccessPolicySpec) DeepCopy() *ExporterAccessPolicySpec { + if in == nil { + return nil + } + out := new(ExporterAccessPolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExporterAccessPolicyStatus) DeepCopyInto(out *ExporterAccessPolicyStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExporterAccessPolicyStatus. +func (in *ExporterAccessPolicyStatus) DeepCopy() *ExporterAccessPolicyStatus { + if in == nil { + return nil + } + out := new(ExporterAccessPolicyStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExporterList) DeepCopyInto(out *ExporterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Exporter, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExporterList. +func (in *ExporterList) DeepCopy() *ExporterList { + if in == nil { + return nil + } + out := new(ExporterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ExporterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExporterSpec) DeepCopyInto(out *ExporterSpec) { + *out = *in + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExporterSpec. +func (in *ExporterSpec) DeepCopy() *ExporterSpec { + if in == nil { + return nil + } + out := new(ExporterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExporterStatus) DeepCopyInto(out *ExporterStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Credential != nil { + in, out := &in.Credential, &out.Credential + *out = new(v1.LocalObjectReference) + **out = **in + } + if in.Devices != nil { + in, out := &in.Devices, &out.Devices + *out = make([]Device, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LeaseRef != nil { + in, out := &in.LeaseRef, &out.LeaseRef + *out = new(v1.LocalObjectReference) + **out = **in + } + in.LastSeen.DeepCopyInto(&out.LastSeen) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExporterStatus. +func (in *ExporterStatus) DeepCopy() *ExporterStatus { + if in == nil { + return nil + } + out := new(ExporterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *From) DeepCopyInto(out *From) { + *out = *in + in.ClientSelector.DeepCopyInto(&out.ClientSelector) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new From. +func (in *From) DeepCopy() *From { + if in == nil { + return nil + } + out := new(From) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Internal) DeepCopyInto(out *Internal) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Internal. +func (in *Internal) DeepCopy() *Internal { + if in == nil { + return nil + } + out := new(Internal) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Lease) DeepCopyInto(out *Lease) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Lease. +func (in *Lease) DeepCopy() *Lease { + if in == nil { + return nil + } + out := new(Lease) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Lease) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LeaseList) DeepCopyInto(out *LeaseList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Lease, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaseList. +func (in *LeaseList) DeepCopy() *LeaseList { + if in == nil { + return nil + } + out := new(LeaseList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LeaseList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LeaseSpec) DeepCopyInto(out *LeaseSpec) { + *out = *in + out.ClientRef = in.ClientRef + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(metav1.Duration) + **out = **in + } + in.Selector.DeepCopyInto(&out.Selector) + if in.BeginTime != nil { + in, out := &in.BeginTime, &out.BeginTime + *out = (*in).DeepCopy() + } + if in.EndTime != nil { + in, out := &in.EndTime, &out.EndTime + *out = (*in).DeepCopy() + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaseSpec. +func (in *LeaseSpec) DeepCopy() *LeaseSpec { + if in == nil { + return nil + } + out := new(LeaseSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LeaseStatus) DeepCopyInto(out *LeaseStatus) { + *out = *in + if in.BeginTime != nil { + in, out := &in.BeginTime, &out.BeginTime + *out = (*in).DeepCopy() + } + if in.EndTime != nil { + in, out := &in.EndTime, &out.EndTime + *out = (*in).DeepCopy() + } + if in.ExporterRef != nil { + in, out := &in.ExporterRef, &out.ExporterRef + *out = new(v1.LocalObjectReference) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaseStatus. +func (in *LeaseStatus) DeepCopy() *LeaseStatus { + if in == nil { + return nil + } + out := new(LeaseStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Policy) DeepCopyInto(out *Policy) { + *out = *in + if in.From != nil { + in, out := &in.From, &out.From + *out = make([]From, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MaximumDuration != nil { + in, out := &in.MaximumDuration, &out.MaximumDuration + *out = new(metav1.Duration) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Policy. +func (in *Policy) DeepCopy() *Policy { + if in == nil { + return nil + } + out := new(Policy) + in.DeepCopyInto(out) + return out +} diff --git a/controller/buf.gen.yaml b/controller/buf.gen.yaml new file mode 100644 index 000000000..279feb359 --- /dev/null +++ b/controller/buf.gen.yaml @@ -0,0 +1,23 @@ +version: v2 +managed: + enabled: true + disable: + - module: buf.build/googleapis/googleapis + - module: buf.build/grpc-ecosystem/grpc-gateway + override: + - file_option: go_package_prefix + value: github.com/jumpstarter-dev/jumpstarter-controller/internal/protocol +plugins: + - remote: buf.build/protocolbuffers/go + out: internal/protocol + opt: paths=source_relative + - remote: buf.build/grpc/go + out: internal/protocol + opt: paths=source_relative + - remote: buf.build/grpc-ecosystem/gateway + out: internal/protocol + opt: paths=source_relative +inputs: + - git_repo: https://github.com/jumpstarter-dev/jumpstarter-protocol.git + branch: main + subdir: proto diff --git a/controller/cmd/main.go b/controller/cmd/main.go new file mode 100644 index 000000000..37f3fd27a --- /dev/null +++ b/controller/cmd/main.go @@ -0,0 +1,315 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "context" + "crypto/tls" + "encoding/pem" + "flag" + "net" + "os" + + // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) + // to ensure that exec-entrypoint and run can make use of them. + apiserverinstall "k8s.io/apiserver/pkg/apis/apiserver/install" + _ "k8s.io/client-go/plugin/pkg/client/auth" + + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/healthz" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" + "sigs.k8s.io/controller-runtime/pkg/webhook" + + jumpstarterdevv1alpha1 "github.com/jumpstarter-dev/jumpstarter-controller/api/v1alpha1" + "github.com/jumpstarter-dev/jumpstarter-controller/internal/authentication" + "github.com/jumpstarter-dev/jumpstarter-controller/internal/authorization" + "github.com/jumpstarter-dev/jumpstarter-controller/internal/config" + "github.com/jumpstarter-dev/jumpstarter-controller/internal/controller" + "github.com/jumpstarter-dev/jumpstarter-controller/internal/oidc" + "github.com/jumpstarter-dev/jumpstarter-controller/internal/service" + + // +kubebuilder:scaffold:imports + + _ "google.golang.org/grpc/encoding" +) + +var ( + scheme = runtime.NewScheme() + setupLog = ctrl.Log.WithName("setup") + + // Version information - set via ldflags at build time + version = "dev" + gitCommit = "unknown" + buildDate = "unknown" +) + +const ( + // namespaceFile is the path to the namespace file mounted by Kubernetes + namespaceFile = "/var/run/secrets/kubernetes.io/serviceaccount/namespace" +) + +// getWatchNamespace returns the namespace the controller should watch. +// It tries multiple sources in order: +// 1. NAMESPACE environment variable (explicit configuration takes precedence) +// 2. Namespace file (automatically mounted by Kubernetes in every pod) +// 3. Empty string (will fail, not supported since 0.8.0) +func getWatchNamespace() string { + // First check NAMESPACE environment variable (explicit configuration) + if ns := os.Getenv("NAMESPACE"); ns != "" { + setupLog.Info("Using namespace from NAMESPACE environment variable", "namespace", ns) + return ns + } + + // Fall back to reading from the namespace file mounted by Kubernetes + if ns, err := os.ReadFile(namespaceFile); err == nil { + namespace := string(ns) + if namespace != "" { + setupLog.Info("Auto-detected namespace from service account", "namespace", namespace) + return namespace + } + } + + return "" +} + +func init() { + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + + utilruntime.Must(jumpstarterdevv1alpha1.AddToScheme(scheme)) + + // +kubebuilder:scaffold:scheme + apiserverinstall.Install(scheme) +} + +func main() { + var metricsAddr string + var enableLeaderElection bool + var probeAddr string + var secureMetrics bool + var enableHTTP2 bool + flag.StringVar(&metricsAddr, "metrics-bind-address", "0", "The address the metric endpoint binds to. "+ + "Use the port :8080. If not set, it will be 0 in order to disable the metrics server") + flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") + flag.BoolVar(&enableLeaderElection, "leader-elect", false, + "Enable leader election for controller manager. "+ + "Enabling this will ensure there is only one active controller manager.") + flag.BoolVar(&secureMetrics, "metrics-secure", false, + "If set the metrics endpoint is served securely") + flag.BoolVar(&enableHTTP2, "enable-http2", false, + "If set, HTTP/2 will be enabled for the metrics and webhook servers") + opts := zap.Options{ + Development: true, + } + opts.BindFlags(flag.CommandLine) + flag.Parse() + + ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts))) + + // Print version information + setupLog.Info("Jumpstarter Controller starting", + "version", version, + "gitCommit", gitCommit, + "buildDate", buildDate, + ) + + // if the enable-http2 flag is false (the default), http/2 should be disabled + // due to its vulnerabilities. More specifically, disabling http/2 will + // prevent from being vulnerable to the HTTP/2 Stream Cancellation and + // Rapid Reset CVEs. For more information see: + // - https://github.com/advisories/GHSA-qppj-fm5r-hxr3 + // - https://github.com/advisories/GHSA-4374-p667-p6c8 + disableHTTP2 := func(c *tls.Config) { + setupLog.Info("disabling http/2") + c.NextProtos = []string{"http/1.1"} + } + + tlsOpts := []func(*tls.Config){} + if !enableHTTP2 { + tlsOpts = append(tlsOpts, disableHTTP2) + } + + webhookServer := webhook.NewServer(webhook.Options{ + TLSOpts: tlsOpts, + }) + + // Get the namespace to watch. Try to auto-detect from the pod's service account, + // fall back to NAMESPACE environment variable, or watch all namespaces if neither is available + watchNamespace := getWatchNamespace() + + mgrOptions := ctrl.Options{ + Scheme: scheme, + Metrics: metricsserver.Options{ + BindAddress: metricsAddr, + SecureServing: secureMetrics, + TLSOpts: tlsOpts, + }, + WebhookServer: webhookServer, + HealthProbeBindAddress: probeAddr, + LeaderElection: enableLeaderElection, + LeaderElectionID: "a38b78e7.jumpstarter.dev", + // LeaderElectionReleaseOnCancel defines if the leader should step down voluntarily + // when the Manager ends. This requires the binary to immediately end when the + // Manager is stopped, otherwise, this setting is unsafe. Setting this significantly + // speeds up voluntary leader transitions as the new leader don't have to wait + // LeaseDuration time first. + // + // In the default scaffold provided, the program ends immediately after + // the manager stops, so would be fine to enable this option. However, + // if you are doing or is intended to do any operation such as perform cleanups + // after the manager stops then its usage might be unsafe. + // LeaderElectionReleaseOnCancel: true, + } + + // If a specific namespace is set, configure the cache to only watch that namespace + if watchNamespace != "" { + mgrOptions.LeaderElectionNamespace = watchNamespace + mgrOptions.Cache = cache.Options{ + DefaultNamespaces: map[string]cache.Config{ + watchNamespace: {}, + }, + } + } else { + setupLog.Error(nil, "Jumpstarter controller can only be configured to work on a single namespace since 0.8.0") + os.Exit(1) + } + + mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), mgrOptions) + if err != nil { + setupLog.Error(err, "unable to start manager") + os.Exit(1) + } + + oidcCert, err := service.NewSelfSignedCertificate("jumpstarter oidc", []string{"localhost"}, []net.IP{}) + if err != nil { + setupLog.Error(err, "unable to generate certificate for internal oidc provider") + os.Exit(1) + } + + oidcSigner, err := oidc.NewSignerFromSeed( + []byte(os.Getenv("CONTROLLER_KEY")), + "https://localhost:8085", + "jumpstarter", + ) + if err != nil { + setupLog.Error(err, "unable to create internal oidc signer") + os.Exit(1) + } + + authenticator, prefix, router, option, provisioning, err := config.LoadConfiguration( + context.Background(), + mgr.GetAPIReader(), + mgr.GetScheme(), + client.ObjectKey{ + Namespace: os.Getenv("NAMESPACE"), + Name: "jumpstarter-controller", + }, + oidcSigner, + string(pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: oidcCert.Certificate[0], + })), + ) + if err != nil { + setupLog.Error(err, "unable to load configuration") + os.Exit(1) + } + + if err = (&controller.ExporterReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Signer: oidcSigner, + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "Exporter") + os.Exit(1) + } + if err = (&controller.ClientReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Signer: oidcSigner, + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "Identity") + os.Exit(1) + } + if err = (&controller.LeaseReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "Lease") + os.Exit(1) + } + // +kubebuilder:scaffold:builder + + watchClient, err := client.NewWithWatch(mgr.GetConfig(), client.Options{Scheme: mgr.GetScheme()}) + if err != nil { + setupLog.Error(err, "unable to create client with watch", "service", "Controller") + os.Exit(1) + } + + if err = (&service.ControllerService{ + Client: watchClient, + Scheme: mgr.GetScheme(), + Authn: authentication.NewBearerTokenAuthenticator(authenticator), + Authz: authorization.NewBasicAuthorizer(watchClient, prefix, provisioning.Enabled), + Attr: authorization.NewMetadataAttributesGetter(authorization.MetadataAttributesGetterConfig{ + NamespaceKey: "jumpstarter-namespace", + ResourceKey: "jumpstarter-kind", + NameKey: "jumpstarter-name", + }), + Router: router, + ServerOption: option, + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create service", "service", "Controller") + os.Exit(1) + } + + if err = (&service.OIDCService{ + Signer: oidcSigner, + Cert: oidcCert, + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create service", "service", "OIDC") + os.Exit(1) + } + + if err = (&service.DashboardService{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create service", "service", "Dashboard") + os.Exit(1) + } + + if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { + setupLog.Error(err, "unable to set up health check") + os.Exit(1) + } + if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { + setupLog.Error(err, "unable to set up ready check") + os.Exit(1) + } + + setupLog.Info("starting manager") + if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { + setupLog.Error(err, "problem running manager") + os.Exit(1) + } +} diff --git a/controller/cmd/router/main.go b/controller/cmd/router/main.go new file mode 100644 index 000000000..04faaf10c --- /dev/null +++ b/controller/cmd/router/main.go @@ -0,0 +1,91 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "context" + "flag" + "os" + "os/signal" + "syscall" + + ctrl "sigs.k8s.io/controller-runtime" + kclient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + "github.com/go-logr/logr" + "github.com/jumpstarter-dev/jumpstarter-controller/internal/config" + "github.com/jumpstarter-dev/jumpstarter-controller/internal/service" + + _ "google.golang.org/grpc/encoding/gzip" +) + +var ( + // Version information - set via ldflags at build time + version = "dev" + gitCommit = "unknown" + buildDate = "unknown" +) + +func main() { + opts := zap.Options{} + opts.BindFlags(flag.CommandLine) + + flag.Parse() + + ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts))) + logger := ctrl.Log.WithName("router") + ctx := logr.NewContext(context.Background(), logger) + + // Print version information + logger.Info("Jumpstarter Router starting", + "version", version, + "gitCommit", gitCommit, + "buildDate", buildDate, + ) + + cfg := ctrl.GetConfigOrDie() + client, err := kclient.New(cfg, kclient.Options{}) + if err != nil { + logger.Error(err, "failed to create k8s client") + os.Exit(1) + } + + serverOption, err := config.LoadRouterConfiguration(ctx, client, kclient.ObjectKey{ + Namespace: os.Getenv("NAMESPACE"), + Name: "jumpstarter-controller", + }) + if err != nil { + logger.Error(err, "failed to load router configuration") + os.Exit(1) + } + + svc := service.RouterService{ + ServerOption: serverOption, + } + + err = svc.Start(ctx) + if err != nil { + logger.Error(err, "failed to start router service") + os.Exit(1) + } + + sigs := make(chan os.Signal, 1) + signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) + sig := <-sigs + logger.Info("received signal, exiting", "signal", sig) +} diff --git a/controller/config/samples/dex.yaml b/controller/config/samples/dex.yaml new file mode 100644 index 000000000..f0df869f6 --- /dev/null +++ b/controller/config/samples/dex.yaml @@ -0,0 +1,28 @@ +issuer: https://10.239.206.8:5556/dex +storage: + type: sqlite3 + config: + file: dex.db +web: + https: 0.0.0.0:5556 + tlsCert: 10.239.206.8/cert.pem + tlsKey: 10.239.206.8/key.pem +logger: + level: "debug" + format: "text" +staticClients: + - id: jumpstarter + name: jumpstarter + secret: secret +oauth2: + passwordConnector: local +enablePasswordDB: true +staticPasswords: + - email: "client-sample@example.com" + hash: "$2a$10$2b2cU8CPhOTaGrs1HRQuAueS7JTT5ZHsHSzYiFPm1leZck7Mc8T4W" # password + username: "client-sample" + userID: "73bca0b9-9be6-4e73-a8fb-347c2ac23255" + - email: "exporter-sample@example.com" + hash: "$2a$10$2b2cU8CPhOTaGrs1HRQuAueS7JTT5ZHsHSzYiFPm1leZck7Mc8T4W" # password + username: "exporter-sample" + userID: "a4cb4de2-4467-4e5c-a42a-33be8783649d" diff --git a/controller/config/samples/kustomization.yaml b/controller/config/samples/kustomization.yaml new file mode 100644 index 000000000..cb8b3f072 --- /dev/null +++ b/controller/config/samples/kustomization.yaml @@ -0,0 +1,7 @@ +## Append samples of your project ## +resources: +- v1alpha1_exporter.yaml +- v1alpha1_client.yaml +- v1alpha1_lease.yaml +- v1alpha1_exporteraccesspolicy.yaml +# +kubebuilder:scaffold:manifestskustomizesamples diff --git a/controller/config/samples/v1alpha1_client.yaml b/controller/config/samples/v1alpha1_client.yaml new file mode 100644 index 000000000..76c4adde8 --- /dev/null +++ b/controller/config/samples/v1alpha1_client.yaml @@ -0,0 +1,9 @@ +apiVersion: jumpstarter.dev/v1alpha1 +kind: Client +metadata: + labels: + app.kubernetes.io/name: jumpstarter-router + app.kubernetes.io/managed-by: kustomize + client-type: developer + name: client-sample +spec: {} diff --git a/controller/config/samples/v1alpha1_exporter.yaml b/controller/config/samples/v1alpha1_exporter.yaml new file mode 100644 index 000000000..55d414adc --- /dev/null +++ b/controller/config/samples/v1alpha1_exporter.yaml @@ -0,0 +1,8 @@ +apiVersion: jumpstarter.dev/v1alpha1 +kind: Exporter +metadata: + labels: + app.kubernetes.io/name: jumpstarter-router + dut: fancy-hardware + name: exporter-sample +spec: {} diff --git a/controller/config/samples/v1alpha1_exporteraccesspolicy.yaml b/controller/config/samples/v1alpha1_exporteraccesspolicy.yaml new file mode 100644 index 000000000..f14c0002b --- /dev/null +++ b/controller/config/samples/v1alpha1_exporteraccesspolicy.yaml @@ -0,0 +1,30 @@ +apiVersion: jumpstarter.dev/v1alpha1 +kind: ExporterAccessPolicy +metadata: + labels: + app.kubernetes.io/name: jumpstarter-router + app.kubernetes.io/managed-by: kustomize + name: default +spec: + exporterSelector: + matchLabels: + dut: fancy-hardware + policies: + - priority: 20 # Administrators come first, highest priority + from: + - clientSelector: + matchLabels: + client-type: administrator + - priority: 10 # Developers come next, maximum 2days + maximumDuration: 24h + from: + - clientSelector: + matchLabels: + client-type: developer + - priority: 5 # CI comes next, but only spot instances, can be deallocated + maximumDuration: 12h + spotAccess: true + from: + - clientSelector: + matchLabels: + client-type: ci diff --git a/controller/config/samples/v1alpha1_lease.yaml b/controller/config/samples/v1alpha1_lease.yaml new file mode 100644 index 000000000..a1e92b992 --- /dev/null +++ b/controller/config/samples/v1alpha1_lease.yaml @@ -0,0 +1,12 @@ +apiVersion: jumpstarter.dev/v1alpha1 +kind: Lease +metadata: + labels: + app.kubernetes.io/name: jumpstarter-router + app.kubernetes.io/managed-by: kustomize + name: lease-sample +spec: + clientRef: + name: client-sample + duration: 30s + selector: {} diff --git a/controller/deploy/helm/jumpstarter/.helmignore b/controller/deploy/helm/jumpstarter/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/controller/deploy/helm/jumpstarter/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/controller/deploy/helm/jumpstarter/Chart.yaml b/controller/deploy/helm/jumpstarter/Chart.yaml new file mode 100644 index 000000000..c6cbd9782 --- /dev/null +++ b/controller/deploy/helm/jumpstarter/Chart.yaml @@ -0,0 +1,9 @@ +apiVersion: v2 +name: jumpstarter +description: A helm chart for the jumpstarter project +type: application +version: 0.1.0 +appVersion: "0.1.0" +dependencies: + - name: jumpstarter-controller + condition: jumpstarter-controller.enabled \ No newline at end of file diff --git a/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/Chart.yaml b/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/Chart.yaml new file mode 100644 index 000000000..3c297a894 --- /dev/null +++ b/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/Chart.yaml @@ -0,0 +1,7 @@ +apiVersion: v2 +name: jumpstarter-controller +description: A helm chart for jumpstarter-controller +type: application +version: 0.0.1 +appVersion: 0.0.1 + diff --git a/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/model.py b/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/model.py new file mode 100755 index 000000000..b724c6ef9 --- /dev/null +++ b/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/model.py @@ -0,0 +1,288 @@ +#!/usr/bin/env -S uv run --script +# /// script +# requires-python = ">=3.12" +# dependencies = ["pydantic"] +# /// + +from __future__ import annotations + +import json + +from enum import Enum +from typing import List, Optional, Union + +from pydantic import BaseModel, ConfigDict, Field, RootModel, conint + + +class Provisioning(BaseModel): + model_config = ConfigDict(extra="forbid") + + enabled: Optional[bool] = None + + +class Internal(BaseModel): + model_config = ConfigDict(extra="forbid") + + prefix: Optional[str] = None + + +class Keepalive(BaseModel): + model_config = ConfigDict(extra="forbid") + + minTime: Optional[str] = Field( + None, + description="The minimum amount of time a client should wait before sending a keepalive ping", + ) + permitWithoutStream: Optional[bool] = Field( + None, + description="Whether to allow keepalive pings even when there are no active streams(RPCs)", + ) + + +class Grpc(BaseModel): + model_config = ConfigDict(extra="forbid") + + keepalive: Optional[Keepalive] = None + + +class Metrics(BaseModel): + enabled: Optional[bool] = None + + +class Global(BaseModel): + baseDomain: Optional[str] = Field( + None, description="Base domain to construct the FQDN for the service endpoints" + ) + metrics: Optional[Metrics] = None + + +class Mode(Enum): + ingress = "ingress" + route = "route" + nodeport = "nodeport" + external = "external" # Disable ingress and route generation + + +class Mode1(Enum): + passthrough = "passthrough" + reencrypt = "reencrypt" + + +class Port(RootModel): + root: conint(ge=0, le=65535) + + +class Ingress(BaseModel): + model_config = ConfigDict(extra="forbid") + + enabled: Optional[bool] = Field( + None, description="Whether to enable Ingress for the gRPC endpoint" + ) + class_: Optional[str] = Field( + None, alias="class", description="IngressClass to use for the gRPC endpoint" + ) + + +class Route(BaseModel): + model_config = ConfigDict(extra="forbid") + + enabled: Optional[bool] = Field( + None, description="Whether to enable OpenShift Router for the gRPC endpoint" + ) + + +class PrefixedClaimOrExpression1(BaseModel): + model_config = ConfigDict(extra="forbid") + + claim: str + prefix: str + + +class PrefixedClaimOrExpression2(BaseModel): + model_config = ConfigDict(extra="forbid") + + expression: str + + +class PrefixedClaimOrExpression(RootModel): + root: Union[PrefixedClaimOrExpression1, PrefixedClaimOrExpression2] + + +class ClaimOrExpression1(BaseModel): + model_config = ConfigDict(extra="forbid") + + claim: str + expression: Optional[str] = None + + +class ClaimOrExpression2(BaseModel): + model_config = ConfigDict(extra="forbid") + + claim: Optional[str] = None + expression: str + + +class ClaimOrExpression(RootModel): + root: Union[ClaimOrExpression1, ClaimOrExpression2] + + +class AudienceMatchPolicy(Enum): + MatchAny = "MatchAny" + + +class Issuer(BaseModel): + model_config = ConfigDict(extra="forbid") + + url: Optional[str] = None + discoveryURL: Optional[str] = None + certificateAuthority: Optional[str] = None + audiences: Optional[List[str]] = None + audienceMatchPolicy: Optional[AudienceMatchPolicy] = None + + +class ClaimValidationRule(BaseModel): + model_config = ConfigDict(extra="forbid") + + claim: Optional[str] = None + requiredValue: Optional[str] = None + expression: Optional[str] = None + message: Optional[str] = None + + +class ExtraItem(BaseModel): + model_config = ConfigDict(extra="forbid") + + key: Optional[str] = None + valueExpression: Optional[str] = None + + +class ClaimMappings(BaseModel): + model_config = ConfigDict(extra="forbid") + + username: Optional[PrefixedClaimOrExpression] = None + groups: Optional[PrefixedClaimOrExpression] = None + uid: Optional[ClaimOrExpression] = None + extra: Optional[List[ExtraItem]] = None + + +class UserValidationRule(BaseModel): + model_config = ConfigDict(extra="forbid") + + expression: Optional[str] = None + message: Optional[str] = None + + +class JWTAuthenticator(BaseModel): + model_config = ConfigDict(extra="forbid") + + issuer: Optional[Issuer] = None + claimValidationRules: Optional[List[ClaimValidationRule]] = None + claimMappings: Optional[ClaimMappings] = None + userValidationRules: Optional[List[UserValidationRule]] = None + + +class Authentication(BaseModel): + model_config = ConfigDict(extra="forbid") + + internal: Optional[Internal] = None + jwt: Optional[List[JWTAuthenticator]] = Field( + None, + description="External OIDC authentication, see https://kubernetes.io/docs/reference/access-authn-authz/authentication/#using-authentication-configuration for documentation", + ) + + +class JumpstarterConfig(BaseModel): + model_config = ConfigDict(extra="forbid") + + provisioning: Optional[Provisioning] = None + authentication: Optional[Authentication] = None + grpc: Optional[Grpc] = None + + +class Nodeport(BaseModel): + model_config = ConfigDict(extra="forbid") + + enabled: Optional[bool] = None + port: Optional[Port] = None + routerPort: Optional[Port] = None + + +class Tls(BaseModel): + model_config = ConfigDict(extra="forbid") + + enabled: Optional[bool] = None + secret: Optional[str] = None + controllerCertSecret: Optional[str] = Field( + None, + description="Secret containing the TLS certificate/key for the gRPC controller endpoint", + ) + routerCertSecret: Optional[str] = Field( + None, + description="Secret containing the TLS certificate/key for the gRPC router endpoints", + ) + port: Optional[Port] = Field( + None, + description="Port to use for the gRPC endpoints Ingress or Route, this can be useful for ingress routers on non-standard ports", + ) + mode: Optional[Mode1] = Field(None, description="TLS mode for gRPC endpoints") + + +class Grpc1(BaseModel): + model_config = ConfigDict(extra="forbid") + + hostname: Optional[str] = Field( + None, description="Hostname for the controller to use for the controller gRPC" + ) + routerHostname: Optional[str] = Field( + None, description="Hostname for the router to use for the router gRPC" + ) + endpoint: Optional[str] = Field( + None, + description="The endpoints are passed down to the services to know where to announce the endpoints to the clients", + ) + routerEndpoint: Optional[str] = Field( + None, + description="The endpoints are passed down to the services to know where to announce the endpoints to the clients", + ) + additionalRouters: dict[str, Router] | None = Field( + None, description="Additional routers to deploy" + ) + ingress: Optional[Ingress] = None + route: Optional[Route] = None + nodeport: Optional[Nodeport] = None + mode: Optional[Mode] = None + tls: Optional[Tls] = None + + +class Router(BaseModel): + model_config = ConfigDict(extra="forbid") + hostname: str | None = None + endpoint: str | None = None + labels: dict[str, str] | None = None + nodeSelector: dict[str, str] | None = None + + +class Model(BaseModel): + model_config = ConfigDict(extra="forbid") + + enabled: Optional[bool] = Field( + None, description="Whether to enable jumpstarter controller" + ) + authenticationConfig: Optional[str] = None + config: Optional[JumpstarterConfig] = None + namespace: Optional[str] = Field( + None, + description="Namespace where the controller will be deployed, defaults to global.namespace", + ) + image: str = Field(..., description="Image for the controller") + tag: Optional[str] = Field(None, description="Image tag for the controller") + imagePullPolicy: str = Field( + ..., description="Image pull policy for the controller" + ) + global_: Optional[Global] = Field( + None, alias="global", description="Global parameters" + ) + grpc: Optional[Grpc1] = None + + +print(json.dumps(Model.model_json_schema(), indent=2)) diff --git a/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/_endpoints.tpl b/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/_endpoints.tpl new file mode 100644 index 000000000..a925e71cb --- /dev/null +++ b/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/_endpoints.tpl @@ -0,0 +1,2 @@ +{{- define "router.endpoint" }}{{ if .Values.grpc.routerHostname }}{{ .Values.grpc.routerHostname }}{{ else }}router.{{ .Values.global.baseDomain | required "grpc.routerHostname or global.baseDomain must be set"}}{{ end }}{{- end }} +{{- define "controller.endpoint" }}{{ if .Values.grpc.hostname }}{{ .Values.grpc.hostname }}{{ else }}grpc.{{ .Values.global.baseDomain | required "grpc.hostname or global.baseDomain must be set"}}{{ end }}{{- end }} \ No newline at end of file diff --git a/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/additional-router-deployment.yaml b/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/additional-router-deployment.yaml new file mode 100644 index 000000000..49cc02726 --- /dev/null +++ b/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/additional-router-deployment.yaml @@ -0,0 +1,111 @@ +{{ range $k, $v := .Values.grpc.additionalRouters }} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: jumpstarter-router-{{ $k }} + namespace: {{ default $.Release.Namespace $.Values.namespace }} + labels: + control-plane: controller-router-{{ $k }} + app.kubernetes.io/name: jumpstarter-controller + {{ if $.Values.global.timestamp }} + deployment.timestamp: {{ $.Values.global.timestamp | quote }} + {{ end }} + annotations: + argocd.argoproj.io/sync-wave: "1" +spec: + selector: + matchLabels: + control-plane: controller-router-{{ $k }} + replicas: 1 + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: router + configmap-sha256: {{ include (print $.Template.BasePath "/cms/controller-cm.yaml") $ | sha256sum }} + labels: + control-plane: controller-router-{{ $k }} + {{ if $.Values.global.timestamp }} + deployment.timestamp: {{ $.Values.global.timestamp | quote }} + {{ end }} + spec: + # TODO(user): Uncomment the following code to configure the nodeAffinity expression + # according to the platforms which are supported by your solution. + # It is considered best practice to support multiple architectures. You can + # build your manager image using the makefile target docker-buildx. + # affinity: + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: kubernetes.io/arch + # operator: In + # values: + # - amd64 + # - arm64 + # - ppc64le + # - s390x + # - key: kubernetes.io/os + # operator: In + # values: + # - linux + {{ if $v.nodeSelector }} + nodeSelector: + {{ $v.nodeSelector | toYaml | indent 1 }} + {{ end }} + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + containers: + - command: + - /router + env: + - name: GRPC_ROUTER_ENDPOINT + {{ if $v.endpoint }} + value: {{ $v.endpoint }} + {{ else if $v.hostname }} + value: {{ $v.hostname }}:{{ default 443 $.Values.grpc.tls.port }} + {{ else }} + value: router-{{ $k }}.{{ $.Values.global.baseDomain | required "set .global.baseDomain, or provide grpc.additionalRouters[...].endpoint/hostname" }}:{{ default 443 $.Values.grpc.tls.port }} + {{ end }} + - name: ROUTER_KEY + valueFrom: + secretKeyRef: + name: jumpstarter-router-secret + key: key + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + + image: {{ $.Values.image }}:{{ default $.Chart.AppVersion $.Values.tag }} + imagePullPolicy: {{ $.Values.imagePullPolicy }} + name: router + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - "ALL" + # livenessProbe: + # httpGet: + # path: /healthz + # port: 8081 + # initialDelaySeconds: 15 + # periodSeconds: 20 + # readinessProbe: + # httpGet: + # path: /readyz + # port: 8081 + # initialDelaySeconds: 5 + # periodSeconds: 10 + resources: + limits: + cpu: 2000m + memory: 1024Mi + requests: + cpu: 1000m + memory: 256Mi + serviceAccountName: controller-manager + terminationGracePeriodSeconds: 10 +{{ end }} diff --git a/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/additional-router-ingress.yaml b/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/additional-router-ingress.yaml new file mode 100644 index 000000000..92522341e --- /dev/null +++ b/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/additional-router-ingress.yaml @@ -0,0 +1,47 @@ +{{ if eq .Values.grpc.mode "ingress" }} +{{ range $k, $v := .Values.grpc.additionalRouters }} +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + nginx.ingress.kubernetes.io/ssl-redirect: "true" + nginx.ingress.kubernetes.io/backend-protocol: "GRPC" + nginx.ingress.kubernetes.io/proxy-read-timeout: "300" + nginx.ingress.kubernetes.io/proxy-send-timeout: "300" + {{ if eq $.Values.grpc.tls.mode "passthrough" }} + nginx.ingress.kubernetes.io/ssl-passthrough: "true" + {{ end }} + name: jumpstarter-router-ingress-{{ $k }} + namespace: {{ default $.Release.Namespace $.Values.namespace }} +spec: + {{ if $.Values.grpc.ingress.class }} + ingressClassName: {{ $.Values.grpc.ingress.class }} + {{ end }} + rules: + {{ if $v.hostname }} + - host: {{ $v.hostname }} + {{ else }} + - host: router-{{ $k }}.{{ $.Values.global.baseDomain | required "a global.baseDomain or a grpc.routerHostname must be provided"}} + {{ end }} + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: jumpstarter-router-grpc-{{ $k }} + port: + number: 8083 + tls: + - hosts: + {{ if $v.hostname }} + - {{ $v.hostname }} + {{ else }} + - router-{{ $k }}.{{ $.Values.global.baseDomain | required "a global.baseDomain or a grpc.routerHostname must be provided"}} + {{ end }} + {{ if $.Values.grpc.tls.routerCertSecret }} + secretName: {{ $.Values.grpc.tls.routerCertSecret }} + {{ end }} +{{ end }} +{{ end }} diff --git a/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/additional-router-route.yaml b/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/additional-router-route.yaml new file mode 100644 index 000000000..b001804a8 --- /dev/null +++ b/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/additional-router-route.yaml @@ -0,0 +1,42 @@ +{{ if eq .Values.grpc.mode "route" }} +{{ range $k, $v := .Values.grpc.additionalRouters }} +--- +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + labels: + external-exposed: "true" + shard: external + annotations: + haproxy.router.openshift.io/timeout: 2d + haproxy.router.openshift.io/timeout-tunnel: 2d + name: jumpstarter-router-route-{{ $k }} + namespace: {{ default $.Release.Namespace $.Values.namespace }} +spec: + {{ if $v.hostname }} + host: {{ $v.hostname }} + {{ else }} + host: router-{{ $k }}.{{ $.Values.global.baseDomain | required "a global.baseDomain or a grpc.routerHostname must be provided"}} + {{ end }} + port: + targetPort: 8083 + tls: + {{ if eq $.Values.grpc.tls.mode "passthrough" }} + termination: passthrough + {{ end }} + {{ if eq $.Values.grpc.tls.mode "reencrypt" }} + termination: reencrypt + {{ end }} + insecureEdgeTerminationPolicy: None + {{ if $.Values.grpc.tls.routerCertSecret }} + externalCertificate: + name: {{ $.Values.grpc.tls.routerCertSecret }} + {{ end }} + + to: + kind: Service + name: jumpstarter-router-grpc-{{ $k }} + weight: 100 + wildcardPolicy: None +{{ end }} +{{ end }} diff --git a/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/additional-router-service.yaml b/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/additional-router-service.yaml new file mode 100644 index 000000000..0b0de609b --- /dev/null +++ b/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/additional-router-service.yaml @@ -0,0 +1,27 @@ +{{ range $k, $v := .Values.grpc.additionalRouters }} +--- +apiVersion: v1 +kind: Service +metadata: + labels: + control-plane: controller-router-{{ $k }} + app.kubernetes.io/name: jumpstarter-controller + name: jumpstarter-router-grpc-{{ $k }} + namespace: {{ default $.Release.Namespace $.Values.namespace }} +spec: + {{ if .Values.grpc.nodeport.enabled }} + type: NodePort + {{ end }} + + ports: + - name: grpc + port: 8083 + protocol: TCP + targetPort: 8083 + appProtocol: h2c # HTTP/2 over cleartext for gRPC (fixes edge termination in ingress/router) + {{ if .Values.grpc.nodeport.enabled }} + nodePort: {{ .Values.grpc.nodeport.routerPort }} + {{ end }} + selector: + control-plane: controller-router-{{ $k }} +{{ end }} diff --git a/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/cms/controller-cm.yaml b/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/cms/controller-cm.yaml new file mode 100644 index 000000000..7f75608d9 --- /dev/null +++ b/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/cms/controller-cm.yaml @@ -0,0 +1,42 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: jumpstarter-controller + namespace: {{ default .Release.Namespace .Values.namespace }} + labels: + control-plane: controller-manager + app.kubernetes.io/name: jumpstarter-controller + {{ if .Values.global.timestamp }} + deployment.timestamp: {{ .Values.global.timestamp | quote }} + {{ end }} +data: + # backwards compatibility + # TODO: remove in 0.7.0 + {{ if .Values.authenticationConfig }} + authentication: {{- .Values.authenticationConfig | toYaml | indent 1 }} + {{ end }} + config: | +{{ .Values.config | toYaml | indent 4 }} + router: | + default: + {{ if .Values.grpc.routerEndpoint }} + endpoint: {{ .Values.grpc.routerEndpoint }} + {{ else if .Values.routerHostname }} + endpoint: {{ .Values.routerHostname }}:{{ .Values.grpc.tls.port }} + {{ else }} + endpoint: router.{{ .Values.global.baseDomain }}:{{ .Values.grpc.tls.port }} + {{ end }} + {{ range $k, $v := .Values.grpc.additionalRouters }} + {{ $k }}: + {{ if $v.endpoint }} + endpoint: {{ $v.endpoint }} + {{ else if $v.hostname }} + endpoint: {{ $v.hostname }}:{{ $.Values.grpc.tls.port }} + {{ else }} + endpoint: router-{{ $k }}.{{ $.Values.global.baseDomain }}:{{ $.Values.grpc.tls.port }} + {{ end }} + {{ if $v.labels }} + labels: + {{ $v.labels | toYaml | indent 1 }} + {{ end }} + {{ end }} diff --git a/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/controller-deployment.yaml b/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/controller-deployment.yaml new file mode 100644 index 000000000..f7d5730c7 --- /dev/null +++ b/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/controller-deployment.yaml @@ -0,0 +1,128 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: jumpstarter-controller + namespace: {{ default .Release.Namespace .Values.namespace }} + labels: + control-plane: controller-manager + app.kubernetes.io/name: jumpstarter-controller + {{ if .Values.global.timestamp }} + deployment.timestamp: {{ .Values.global.timestamp | quote }} + {{ end }} + annotations: + argocd.argoproj.io/sync-wave: "1" +spec: + selector: + matchLabels: + control-plane: controller-manager + replicas: 1 + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: manager + configmap-sha256: {{ include (print $.Template.BasePath "/cms/controller-cm.yaml") . | sha256sum }} + labels: + control-plane: controller-manager + {{ if .Values.global.timestamp }} + deployment.timestamp: {{ .Values.global.timestamp | quote }} + {{ end }} + spec: + # TODO(user): Uncomment the following code to configure the nodeAffinity expression + # according to the platforms which are supported by your solution. + # It is considered best practice to support multiple architectures. You can + # build your manager image using the makefile target docker-buildx. + # affinity: + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: kubernetes.io/arch + # operator: In + # values: + # - amd64 + # - arm64 + # - ppc64le + # - s390x + # - key: kubernetes.io/os + # operator: In + # values: + # - linux + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + containers: + - args: + - --leader-elect + - --health-probe-bind-address=:8081 + - -metrics-bind-address=:8080 + env: + - name: GRPC_ENDPOINT + {{ if .Values.grpc.endpoint }} + value : {{ .Values.grpc.endpoint }} + {{ else if .Values.hostname }} + value: {{ .Values.hostname }}:{{ .Values.grpc.tls.port }} + {{ else }} + value: grpc.{{ .Values.global.baseDomain }}:{{ .Values.grpc.tls.port }} + {{ end }} + - name: CONTROLLER_KEY + valueFrom: + secretKeyRef: + name: jumpstarter-controller-secret + key: key + - name: ROUTER_KEY + valueFrom: + secretKeyRef: + name: jumpstarter-router-secret + key: key + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- if .Values.grpc.tls.controllerCertSecret }} + - name: EXTERNAL_CERT_PEM + value: /secrets/tls.crt + - name: EXTERNAL_KEY_PEM + value: /secrets/tls.key + {{- end }} + image: {{ .Values.image }}:{{ default .Chart.AppVersion .Values.tag }} + imagePullPolicy: {{ .Values.imagePullPolicy }} + name: manager + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - "ALL" + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + resources: + limits: + cpu: 2000m + memory: 1024Mi + requests: + cpu: 1000m + memory: 256Mi + {{- if .Values.grpc.tls.controllerCertSecret }} + volumeMounts: + - name: external-cert + mountPath: /secrets + readOnly: true + {{- end }} + {{- if .Values.grpc.tls.controllerCertSecret }} + volumes: + - name: external-cert + secret: + secretName: {{ .Values.grpc.tls.controllerCertSecret }} + {{- end }} + serviceAccountName: controller-manager + terminationGracePeriodSeconds: 10 diff --git a/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/controller-ingress.yaml b/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/controller-ingress.yaml new file mode 100644 index 000000000..aba515c4b --- /dev/null +++ b/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/controller-ingress.yaml @@ -0,0 +1,44 @@ +{{ if eq .Values.grpc.mode "ingress" }} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + nginx.ingress.kubernetes.io/ssl-redirect: "true" + nginx.ingress.kubernetes.io/backend-protocol: "GRPC" + nginx.ingress.kubernetes.io/proxy-read-timeout: "300" + nginx.ingress.kubernetes.io/proxy-send-timeout: "300" + {{ if eq .Values.grpc.tls.mode "passthrough" }} + nginx.ingress.kubernetes.io/ssl-passthrough: "true" + {{ end }} + name: jumpstarter-controller-ingress + namespace: {{ default .Release.Namespace .Values.namespace }} +spec: + {{ if .Values.grpc.ingress.class }} + ingressClassName: {{ .Values.grpc.ingress.class }} + {{ end }} + rules: + {{ if .Values.grpc.hostname }} + - host: {{ .Values.grpc.hostname }} + {{ else }} + - host: grpc.{{ .Values.global.baseDomain | required "a global.baseDomain or a grpc.hostname must be provided"}} + {{ end }} + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: jumpstarter-grpc + port: + number: 8082 + tls: + - hosts: + {{ if .Values.grpc.hostname }} + - {{ .Values.grpc.hostname }} + {{ else }} + - grpc.{{ .Values.global.baseDomain | required "a global.baseDomain or a grpc.hostname must be provided"}} + {{ end }} + {{ if .Values.grpc.tls.controllerCertSecret }} + secretName: {{ .Values.grpc.tls.controllerCertSecret }} + {{ end }} +{{ end }} diff --git a/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/controller-route.yaml b/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/controller-route.yaml new file mode 100644 index 000000000..f7a781f0d --- /dev/null +++ b/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/controller-route.yaml @@ -0,0 +1,34 @@ +{{ if eq .Values.grpc.mode "route" }} +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + labels: + external-exposed: "true" + shard: external + annotations: + haproxy.router.openshift.io/timeout: 2d + haproxy.router.openshift.io/timeout-tunnel: 2d + name: jumpstarter-controller-route + namespace: {{ default .Release.Namespace .Values.namespace }} +spec: + {{ if .Values.grpc.hostname }} + host: {{ .Values.grpc.hostname }} + {{ else }} + host: grpc.{{ .Values.global.baseDomain | required "a global.baseDomain or a grpc.hostname must be provided"}} + {{ end }} + port: + targetPort: 8082 + tls: + termination: {{ .Values.grpc.tls.mode }} + insecureEdgeTerminationPolicy: None + {{ if .Values.grpc.tls.controllerCertSecret }} + externalCertificate: + name: {{ .Values.grpc.tls.controllerCertSecret }} + {{ end }} + + to: + kind: Service + name: jumpstarter-grpc + weight: 100 + wildcardPolicy: None +{{ end }} diff --git a/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/controller-service.yaml b/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/controller-service.yaml new file mode 100644 index 000000000..c75803241 --- /dev/null +++ b/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/controller-service.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + control-plane: controller-manager + app.kubernetes.io/name: jumpstarter-controller + name: jumpstarter-grpc + namespace: {{ default .Release.Namespace .Values.namespace }} +spec: + {{ if .Values.grpc.nodeport.enabled }} + type: NodePort + {{ end }} + + ports: + - name: grpc + port: 8082 + protocol: TCP + targetPort: 8082 + appProtocol: h2c # HTTP/2 over cleartext for gRPC (fixes edge termination in ingress/router) + {{ if .Values.grpc.nodeport.enabled }} + nodePort: {{ .Values.grpc.nodeport.port }} + {{ end }} + selector: + control-plane: controller-manager diff --git a/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/metrics/metrics_service.yaml b/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/metrics/metrics_service.yaml new file mode 100644 index 000000000..b16a19ffc --- /dev/null +++ b/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/metrics/metrics_service.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + control-plane: controller-manager + app.kubernetes.io/name: jumpstarter-controller + name: controller-manager-metrics-service + namespace: {{ default .Release.Namespace .Values.namespace }} +spec: + ports: + - name: http + port: 8080 + protocol: TCP + targetPort: 8080 + selector: + control-plane: controller-manager diff --git a/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/metrics/monitor.yaml b/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/metrics/monitor.yaml new file mode 100644 index 000000000..cb8098a7c --- /dev/null +++ b/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/metrics/monitor.yaml @@ -0,0 +1,20 @@ +# enable monitoring only if monitoring is enabled +{{- if .Values.global.metrics.enabled }} +# Prometheus Monitor Service (Metrics) +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + labels: + control-plane: controller-manager + app.kubernetes.io/name: jumpstarter-controller + name: controller-manager-metrics-monitor + namespace: {{ default .Release.Namespace .Values.namespace }} +spec: + endpoints: + - path: /metrics + port: http # Ensure this is the name of the port that exposes HTTP metrics + scheme: http + selector: + matchLabels: + control-plane: controller-manager +{{- end }} diff --git a/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/rbac/client_editor_role.yaml b/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/rbac/client_editor_role.yaml new file mode 100644 index 000000000..94773c031 --- /dev/null +++ b/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/rbac/client_editor_role.yaml @@ -0,0 +1,26 @@ +# permissions for end users to edit identities. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: jumpstarter-router + name: client-editor-role +rules: +- apiGroups: + - jumpstarter.dev + resources: + - clients + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - jumpstarter.dev + resources: + - identities/status + verbs: + - get diff --git a/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/rbac/client_viewer_role.yaml b/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/rbac/client_viewer_role.yaml new file mode 100644 index 000000000..039ea624d --- /dev/null +++ b/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/rbac/client_viewer_role.yaml @@ -0,0 +1,22 @@ +# permissions for end users to view identities. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: jumpstarter-router + name: client-viewer-role +rules: +- apiGroups: + - jumpstarter.dev + resources: + - clients + verbs: + - get + - list + - watch +- apiGroups: + - jumpstarter.dev + resources: + - identities/status + verbs: + - get diff --git a/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/rbac/exporter_editor_role.yaml b/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/rbac/exporter_editor_role.yaml new file mode 100644 index 000000000..c52b8c246 --- /dev/null +++ b/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/rbac/exporter_editor_role.yaml @@ -0,0 +1,26 @@ +# permissions for end users to edit exporters. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: jumpstarter-router + name: exporter-editor-role +rules: +- apiGroups: + - jumpstarter.dev + resources: + - exporters + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - jumpstarter.dev + resources: + - exporters/status + verbs: + - get diff --git a/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/rbac/exporter_viewer_role.yaml b/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/rbac/exporter_viewer_role.yaml new file mode 100644 index 000000000..5ba5614c5 --- /dev/null +++ b/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/rbac/exporter_viewer_role.yaml @@ -0,0 +1,22 @@ +# permissions for end users to view exporters. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: jumpstarter-router + name: exporter-viewer-role +rules: +- apiGroups: + - jumpstarter.dev + resources: + - exporters + verbs: + - get + - list + - watch +- apiGroups: + - jumpstarter.dev + resources: + - exporters/status + verbs: + - get diff --git a/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/rbac/leader_election_role.yaml b/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/rbac/leader_election_role.yaml new file mode 100644 index 000000000..b0390bd15 --- /dev/null +++ b/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/rbac/leader_election_role.yaml @@ -0,0 +1,40 @@ +# permissions to do leader election. +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/name: jumpstarter-router + name: leader-election-role + namespace: {{ default .Release.Namespace .Values.namespace }} +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch diff --git a/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/rbac/leader_election_role_binding.yaml b/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/rbac/leader_election_role_binding.yaml new file mode 100644 index 000000000..d60dc3c9f --- /dev/null +++ b/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/rbac/leader_election_role_binding.yaml @@ -0,0 +1,15 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/name: jumpstarter-router + namespace: {{ default .Release.Namespace .Values.namespace }} + name: leader-election-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: leader-election-role +subjects: +- kind: ServiceAccount + name: controller-manager + namespace: {{ default .Release.Namespace .Values.namespace }} diff --git a/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/rbac/role.yaml b/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/rbac/role.yaml new file mode 100644 index 000000000..751680e1f --- /dev/null +++ b/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/rbac/role.yaml @@ -0,0 +1,60 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: jumpstarter-manager-role + annotations: + argocd.argoproj.io/sync-wave: "-1" +rules: +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - jumpstarter.dev + resources: + - clients + - exporters + - leases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - jumpstarter.dev + resources: + - clients/finalizers + - exporters/finalizers + - leases/finalizers + verbs: + - update +- apiGroups: + - jumpstarter.dev + resources: + - clients/status + - exporters/status + - leases/status + verbs: + - get + - patch + - update +- apiGroups: + - jumpstarter.dev + resources: + - exporteraccesspolicies + verbs: + - get + - list + - watch diff --git a/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/rbac/role_binding.yaml b/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/rbac/role_binding.yaml new file mode 100644 index 000000000..71d864b05 --- /dev/null +++ b/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/rbac/role_binding.yaml @@ -0,0 +1,16 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/name: jumpstarter-router + annotations: + argocd.argoproj.io/sync-wave: "-1" + name: jumpstarter-manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: jumpstarter-manager-role +subjects: +- kind: ServiceAccount + name: controller-manager + namespace: {{ default .Release.Namespace .Values.namespace }} diff --git a/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/rbac/service_account.yaml b/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/rbac/service_account.yaml new file mode 100644 index 000000000..5359726af --- /dev/null +++ b/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/rbac/service_account.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/name: jumpstarter-router + annotations: + argocd.argoproj.io/sync-wave: "-1" + name: controller-manager + namespace: {{ default .Release.Namespace .Values.namespace }} diff --git a/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/router-deployment.yaml b/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/router-deployment.yaml new file mode 100644 index 000000000..fa9978bf3 --- /dev/null +++ b/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/router-deployment.yaml @@ -0,0 +1,109 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: jumpstarter-router + namespace: {{ default .Release.Namespace .Values.namespace }} + labels: + control-plane: controller-router + app.kubernetes.io/name: jumpstarter-controller + {{ if .Values.global.timestamp }} + deployment.timestamp: {{ .Values.global.timestamp | quote }} + {{ end }} + annotations: + argocd.argoproj.io/sync-wave: "1" +spec: + selector: + matchLabels: + control-plane: controller-router + replicas: 1 + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: router + configmap-sha256: {{ include (print $.Template.BasePath "/cms/controller-cm.yaml") . | sha256sum }} + labels: + control-plane: controller-router + {{ if .Values.global.timestamp }} + deployment.timestamp: {{ .Values.global.timestamp | quote }} + {{ end }} + spec: + # TODO(user): Uncomment the following code to configure the nodeAffinity expression + # according to the platforms which are supported by your solution. + # It is considered best practice to support multiple architectures. You can + # build your manager image using the makefile target docker-buildx. + # affinity: + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: kubernetes.io/arch + # operator: In + # values: + # - amd64 + # - arm64 + # - ppc64le + # - s390x + # - key: kubernetes.io/os + # operator: In + # values: + # - linux + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + containers: + - command: + - /router + env: + - name: GRPC_ROUTER_ENDPOINT + {{ if .Values.grpc.routerEndpoint }} + value: {{ .Values.grpc.routerEndpoint }} + {{ else if .Values.routerHostname }} + value: {{ .Values.routerHostname }}:{{ .Values.grpc.tls.port }} + {{ else }} + value: router.{{ .Values.global.baseDomain }}:{{ .Values.grpc.tls.port }} + {{ end }} + - name: ROUTER_KEY + valueFrom: + secretKeyRef: + name: jumpstarter-router-secret + key: key + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- if .Values.grpc.tls.routerCertSecret }} + - name: EXTERNAL_CERT_PEM + value: /secrets/tls.crt + - name: EXTERNAL_KEY_PEM + value: /secrets/tls.key + {{- end }} + image: {{ .Values.image }}:{{ default .Chart.AppVersion .Values.tag }} + imagePullPolicy: {{ .Values.imagePullPolicy }} + name: router + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - "ALL" + resources: + limits: + cpu: 2000m + memory: 1024Mi + requests: + cpu: 1000m + memory: 256Mi + {{- if .Values.grpc.tls.routerCertSecret }} + volumeMounts: + - name: external-cert + mountPath: /secrets + readOnly: true + {{- end }} + {{- if .Values.grpc.tls.routerCertSecret }} + volumes: + - name: external-cert + secret: + secretName: {{ .Values.grpc.tls.routerCertSecret }} + {{- end }} + serviceAccountName: controller-manager + terminationGracePeriodSeconds: 10 diff --git a/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/router-ingress.yaml b/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/router-ingress.yaml new file mode 100644 index 000000000..57e2235e5 --- /dev/null +++ b/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/router-ingress.yaml @@ -0,0 +1,44 @@ +{{ if eq .Values.grpc.mode "ingress" }} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + nginx.ingress.kubernetes.io/ssl-redirect: "true" + nginx.ingress.kubernetes.io/backend-protocol: "GRPC" + nginx.ingress.kubernetes.io/proxy-read-timeout: "300" + nginx.ingress.kubernetes.io/proxy-send-timeout: "300" + {{ if eq .Values.grpc.tls.mode "passthrough" }} + nginx.ingress.kubernetes.io/ssl-passthrough: "true" + {{ end }} + name: jumpstarter-router-ingress + namespace: {{ default .Release.Namespace .Values.namespace }} +spec: + {{ if .Values.grpc.ingress.class }} + ingressClassName: {{ .Values.grpc.ingress.class }} + {{ end }} + rules: + {{ if .Values.grpc.routerHostname }} + - host: {{ .Values.grpc.routerHostname }} + {{ else }} + - host: router.{{ .Values.global.baseDomain | required "a global.baseDomain or a grpc.routerHostname must be provided"}} + {{ end }} + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: jumpstarter-router-grpc + port: + number: 8083 + tls: + - hosts: + {{ if .Values.grpc.routerHostname }} + - {{ .Values.grpc.routerHostname }} + {{ else }} + - router.{{ .Values.global.baseDomain | required "a global.baseDomain or a grpc.routerHostname must be provided" }} + {{ end }} + {{ if .Values.grpc.tls.routerCertSecret }} + secretName: {{ .Values.grpc.tls.routerCertSecret }} + {{ end }} +{{ end }} diff --git a/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/router-route.yaml b/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/router-route.yaml new file mode 100644 index 000000000..e0659fbe8 --- /dev/null +++ b/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/router-route.yaml @@ -0,0 +1,39 @@ +{{ if eq .Values.grpc.mode "route" }} +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + labels: + external-exposed: "true" + shard: external + annotations: + haproxy.router.openshift.io/timeout: 2d + haproxy.router.openshift.io/timeout-tunnel: 2d + name: jumpstarter-router-route + namespace: {{ default .Release.Namespace .Values.namespace }} +spec: + {{ if .Values.grpc.routerHostname }} + host: {{ .Values.grpc.routerHostname }} + {{ else }} + host: router.{{ .Values.global.baseDomain | required "a global.baseDomain or a grpc.routerHostname must be provided"}} + {{ end }} + port: + targetPort: 8083 + tls: + {{ if eq .Values.grpc.tls.mode "passthrough" }} + termination: passthrough + {{ end }} + {{ if eq .Values.grpc.tls.mode "reencrypt" }} + termination: reencrypt + {{ end }} + insecureEdgeTerminationPolicy: None + {{ if .Values.grpc.tls.routerCertSecret }} + externalCertificate: + name: {{ .Values.grpc.tls.routerCertSecret }} + {{ end }} + + to: + kind: Service + name: jumpstarter-router-grpc + weight: 100 + wildcardPolicy: None +{{ end }} diff --git a/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/router-service.yaml b/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/router-service.yaml new file mode 100644 index 000000000..ad4a2b1b3 --- /dev/null +++ b/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/router-service.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + control-plane: controller-manager + app.kubernetes.io/name: jumpstarter-controller + name: jumpstarter-router-grpc + namespace: {{ default .Release.Namespace .Values.namespace }} +spec: + {{ if .Values.grpc.nodeport.enabled }} + type: NodePort + {{ end }} + + ports: + - name: grpc + port: 8083 + protocol: TCP + targetPort: 8083 + appProtocol: h2c # HTTP/2 over cleartext for gRPC (fixes edge termination in ingress/router) + {{ if .Values.grpc.nodeport.enabled }} + nodePort: {{ .Values.grpc.nodeport.routerPort }} + {{ end }} + selector: + control-plane: controller-router diff --git a/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/secrets-job.yaml b/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/secrets-job.yaml new file mode 100644 index 000000000..309d16fc9 --- /dev/null +++ b/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/templates/secrets-job.yaml @@ -0,0 +1,36 @@ +{{- $namespace := default .Release.Namespace .Values.namespace }} +apiVersion: batch/v1 +kind: Job +metadata: + labels: + control-plane: controller-manager + app.kubernetes.io/name: jumpstarter-controller + annotations: + # https://argo-cd.readthedocs.io/en/stable/user-guide/resource_hooks/#hook-deletion-policies + argocd.argoproj.io/hook: Sync + argocd.argoproj.io/hook-delete-policy: HookSucceeded + argocd.argoproj.io/sync-wave: "-1" + name: jumpstarter-secrets + namespace: {{ $namespace }} +spec: + ttlSecondsAfterFinished: 30 + template: + metadata: + name: jumpstarter-secrets + spec: + serviceAccountName: controller-manager + containers: + - name: jumpstarter-secrets + image: quay.io/jumpstarter-dev/jumpstarter-utils:latest + command: + - /bin/sh + - -c + - | + set -e + {{- range $name := tuple "jumpstarter-router-secret" "jumpstarter-controller-secret" }} + if ! kubectl get secret {{ $name }} -n {{ $namespace }} >/dev/null 2>&1; then + kubectl create secret generic {{ $name }} -n={{ $namespace }} \ + --from-literal=key="$(openssl rand -hex 32)" + fi + {{- end }} + restartPolicy: Never diff --git a/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/values.schema.json b/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/values.schema.json new file mode 100644 index 000000000..091b378a7 --- /dev/null +++ b/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/values.schema.json @@ -0,0 +1,1123 @@ +{ + "$defs": { + "AudienceMatchPolicy": { + "enum": [ + "MatchAny" + ], + "title": "AudienceMatchPolicy", + "type": "string" + }, + "Authentication": { + "additionalProperties": false, + "properties": { + "internal": { + "anyOf": [ + { + "$ref": "#/$defs/Internal" + }, + { + "type": "null" + } + ], + "default": null + }, + "jwt": { + "anyOf": [ + { + "items": { + "$ref": "#/$defs/JWTAuthenticator" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "External OIDC authentication, see https://kubernetes.io/docs/reference/access-authn-authz/authentication/#using-authentication-configuration for documentation", + "title": "Jwt" + } + }, + "title": "Authentication", + "type": "object" + }, + "ClaimMappings": { + "additionalProperties": false, + "properties": { + "username": { + "anyOf": [ + { + "$ref": "#/$defs/PrefixedClaimOrExpression" + }, + { + "type": "null" + } + ], + "default": null + }, + "groups": { + "anyOf": [ + { + "$ref": "#/$defs/PrefixedClaimOrExpression" + }, + { + "type": "null" + } + ], + "default": null + }, + "uid": { + "anyOf": [ + { + "$ref": "#/$defs/ClaimOrExpression" + }, + { + "type": "null" + } + ], + "default": null + }, + "extra": { + "anyOf": [ + { + "items": { + "$ref": "#/$defs/ExtraItem" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Extra" + } + }, + "title": "ClaimMappings", + "type": "object" + }, + "ClaimOrExpression": { + "anyOf": [ + { + "$ref": "#/$defs/ClaimOrExpression1" + }, + { + "$ref": "#/$defs/ClaimOrExpression2" + } + ], + "title": "ClaimOrExpression" + }, + "ClaimOrExpression1": { + "additionalProperties": false, + "properties": { + "claim": { + "title": "Claim", + "type": "string" + }, + "expression": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Expression" + } + }, + "required": [ + "claim" + ], + "title": "ClaimOrExpression1", + "type": "object" + }, + "ClaimOrExpression2": { + "additionalProperties": false, + "properties": { + "claim": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Claim" + }, + "expression": { + "title": "Expression", + "type": "string" + } + }, + "required": [ + "expression" + ], + "title": "ClaimOrExpression2", + "type": "object" + }, + "ClaimValidationRule": { + "additionalProperties": false, + "properties": { + "claim": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Claim" + }, + "requiredValue": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Requiredvalue" + }, + "expression": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Expression" + }, + "message": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Message" + } + }, + "title": "ClaimValidationRule", + "type": "object" + }, + "ExtraItem": { + "additionalProperties": false, + "properties": { + "key": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Key" + }, + "valueExpression": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Valueexpression" + } + }, + "title": "ExtraItem", + "type": "object" + }, + "Global": { + "properties": { + "baseDomain": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Base domain to construct the FQDN for the service endpoints", + "title": "Basedomain" + }, + "metrics": { + "anyOf": [ + { + "$ref": "#/$defs/Metrics" + }, + { + "type": "null" + } + ], + "default": null + } + }, + "title": "Global", + "type": "object" + }, + "Grpc": { + "additionalProperties": false, + "properties": { + "keepalive": { + "anyOf": [ + { + "$ref": "#/$defs/Keepalive" + }, + { + "type": "null" + } + ], + "default": null + } + }, + "title": "Grpc", + "type": "object" + }, + "Grpc1": { + "additionalProperties": false, + "properties": { + "hostname": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Hostname for the controller to use for the controller gRPC", + "title": "Hostname" + }, + "routerHostname": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Hostname for the router to use for the router gRPC", + "title": "Routerhostname" + }, + "endpoint": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The endpoints are passed down to the services to know where to announce the endpoints to the clients", + "title": "Endpoint" + }, + "routerEndpoint": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The endpoints are passed down to the services to know where to announce the endpoints to the clients", + "title": "Routerendpoint" + }, + "additionalRouters": { + "anyOf": [ + { + "additionalProperties": { + "$ref": "#/$defs/Router" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Additional routers to deploy", + "title": "Additionalrouters" + }, + "ingress": { + "anyOf": [ + { + "$ref": "#/$defs/Ingress" + }, + { + "type": "null" + } + ], + "default": null + }, + "route": { + "anyOf": [ + { + "$ref": "#/$defs/Route" + }, + { + "type": "null" + } + ], + "default": null + }, + "nodeport": { + "anyOf": [ + { + "$ref": "#/$defs/Nodeport" + }, + { + "type": "null" + } + ], + "default": null + }, + "mode": { + "anyOf": [ + { + "$ref": "#/$defs/Mode" + }, + { + "type": "null" + } + ], + "default": null + }, + "tls": { + "anyOf": [ + { + "$ref": "#/$defs/Tls" + }, + { + "type": "null" + } + ], + "default": null + } + }, + "title": "Grpc1", + "type": "object" + }, + "Ingress": { + "additionalProperties": false, + "properties": { + "enabled": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Whether to enable Ingress for the gRPC endpoint", + "title": "Enabled" + }, + "class": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "IngressClass to use for the gRPC endpoint", + "title": "Class" + } + }, + "title": "Ingress", + "type": "object" + }, + "Internal": { + "additionalProperties": false, + "properties": { + "prefix": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Prefix" + } + }, + "title": "Internal", + "type": "object" + }, + "Issuer": { + "additionalProperties": false, + "properties": { + "url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Url" + }, + "discoveryURL": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Discoveryurl" + }, + "certificateAuthority": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Certificateauthority" + }, + "audiences": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Audiences" + }, + "audienceMatchPolicy": { + "anyOf": [ + { + "$ref": "#/$defs/AudienceMatchPolicy" + }, + { + "type": "null" + } + ], + "default": null + } + }, + "title": "Issuer", + "type": "object" + }, + "JWTAuthenticator": { + "additionalProperties": false, + "properties": { + "issuer": { + "anyOf": [ + { + "$ref": "#/$defs/Issuer" + }, + { + "type": "null" + } + ], + "default": null + }, + "claimValidationRules": { + "anyOf": [ + { + "items": { + "$ref": "#/$defs/ClaimValidationRule" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Claimvalidationrules" + }, + "claimMappings": { + "anyOf": [ + { + "$ref": "#/$defs/ClaimMappings" + }, + { + "type": "null" + } + ], + "default": null + }, + "userValidationRules": { + "anyOf": [ + { + "items": { + "$ref": "#/$defs/UserValidationRule" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Uservalidationrules" + } + }, + "title": "JWTAuthenticator", + "type": "object" + }, + "JumpstarterConfig": { + "additionalProperties": false, + "properties": { + "provisioning": { + "anyOf": [ + { + "$ref": "#/$defs/Provisioning" + }, + { + "type": "null" + } + ], + "default": null + }, + "authentication": { + "anyOf": [ + { + "$ref": "#/$defs/Authentication" + }, + { + "type": "null" + } + ], + "default": null + }, + "grpc": { + "anyOf": [ + { + "$ref": "#/$defs/Grpc" + }, + { + "type": "null" + } + ], + "default": null + } + }, + "title": "JumpstarterConfig", + "type": "object" + }, + "Keepalive": { + "additionalProperties": false, + "properties": { + "minTime": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The minimum amount of time a client should wait before sending a keepalive ping", + "title": "Mintime" + }, + "permitWithoutStream": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Whether to allow keepalive pings even when there are no active streams(RPCs)", + "title": "Permitwithoutstream" + } + }, + "title": "Keepalive", + "type": "object" + }, + "Metrics": { + "properties": { + "enabled": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Enabled" + } + }, + "title": "Metrics", + "type": "object" + }, + "Mode": { + "enum": [ + "ingress", + "route", + "nodeport", + "external" + ], + "title": "Mode", + "type": "string" + }, + "Mode1": { + "enum": [ + "passthrough", + "reencrypt" + ], + "title": "Mode1", + "type": "string" + }, + "Nodeport": { + "additionalProperties": false, + "properties": { + "enabled": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Enabled" + }, + "port": { + "anyOf": [ + { + "$ref": "#/$defs/Port" + }, + { + "type": "null" + } + ], + "default": null + }, + "routerPort": { + "anyOf": [ + { + "$ref": "#/$defs/Port" + }, + { + "type": "null" + } + ], + "default": null + } + }, + "title": "Nodeport", + "type": "object" + }, + "Port": { + "maximum": 65535, + "minimum": 0, + "title": "Port", + "type": "integer" + }, + "PrefixedClaimOrExpression": { + "anyOf": [ + { + "$ref": "#/$defs/PrefixedClaimOrExpression1" + }, + { + "$ref": "#/$defs/PrefixedClaimOrExpression2" + } + ], + "title": "PrefixedClaimOrExpression" + }, + "PrefixedClaimOrExpression1": { + "additionalProperties": false, + "properties": { + "claim": { + "title": "Claim", + "type": "string" + }, + "prefix": { + "title": "Prefix", + "type": "string" + } + }, + "required": [ + "claim", + "prefix" + ], + "title": "PrefixedClaimOrExpression1", + "type": "object" + }, + "PrefixedClaimOrExpression2": { + "additionalProperties": false, + "properties": { + "expression": { + "title": "Expression", + "type": "string" + } + }, + "required": [ + "expression" + ], + "title": "PrefixedClaimOrExpression2", + "type": "object" + }, + "Provisioning": { + "additionalProperties": false, + "properties": { + "enabled": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Enabled" + } + }, + "title": "Provisioning", + "type": "object" + }, + "Route": { + "additionalProperties": false, + "properties": { + "enabled": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Whether to enable OpenShift Router for the gRPC endpoint", + "title": "Enabled" + } + }, + "title": "Route", + "type": "object" + }, + "Router": { + "additionalProperties": false, + "properties": { + "hostname": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Hostname" + }, + "endpoint": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Endpoint" + }, + "labels": { + "anyOf": [ + { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Labels" + }, + "nodeSelector": { + "anyOf": [ + { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Nodeselector" + } + }, + "title": "Router", + "type": "object" + }, + "Tls": { + "additionalProperties": false, + "properties": { + "enabled": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Enabled" + }, + "secret": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Secret" + }, + "controllerCertSecret": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Secret containing the TLS certificate/key for the gRPC controller endpoint", + "title": "Controllercertsecret" + }, + "routerCertSecret": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Secret containing the TLS certificate/key for the gRPC router endpoints", + "title": "Routercertsecret" + }, + "port": { + "anyOf": [ + { + "$ref": "#/$defs/Port" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Port to use for the gRPC endpoints Ingress or Route, this can be useful for ingress routers on non-standard ports" + }, + "mode": { + "anyOf": [ + { + "$ref": "#/$defs/Mode1" + }, + { + "type": "null" + } + ], + "default": null, + "description": "TLS mode for gRPC endpoints" + } + }, + "title": "Tls", + "type": "object" + }, + "UserValidationRule": { + "additionalProperties": false, + "properties": { + "expression": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Expression" + }, + "message": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Message" + } + }, + "title": "UserValidationRule", + "type": "object" + } + }, + "additionalProperties": false, + "properties": { + "enabled": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Whether to enable jumpstarter controller", + "title": "Enabled" + }, + "authenticationConfig": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Authenticationconfig" + }, + "config": { + "anyOf": [ + { + "$ref": "#/$defs/JumpstarterConfig" + }, + { + "type": "null" + } + ], + "default": null + }, + "namespace": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Namespace where the controller will be deployed, defaults to global.namespace", + "title": "Namespace" + }, + "image": { + "description": "Image for the controller", + "title": "Image", + "type": "string" + }, + "tag": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Image tag for the controller", + "title": "Tag" + }, + "imagePullPolicy": { + "description": "Image pull policy for the controller", + "title": "Imagepullpolicy", + "type": "string" + }, + "global": { + "anyOf": [ + { + "$ref": "#/$defs/Global" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Global parameters" + }, + "grpc": { + "anyOf": [ + { + "$ref": "#/$defs/Grpc1" + }, + { + "type": "null" + } + ], + "default": null + } + }, + "required": [ + "image", + "imagePullPolicy" + ], + "title": "Model", + "type": "object" +} diff --git a/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/values.yaml b/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/values.yaml new file mode 100644 index 000000000..821d701b6 --- /dev/null +++ b/controller/deploy/helm/jumpstarter/charts/jumpstarter-controller/values.yaml @@ -0,0 +1,36 @@ + +namespace: "" + +grpc: + hostname: "" + routerHostname: "" + + # the endpoints are passed down to the service to know where + # to announce the endpoints to the clients + endpoint: "" + routerEndpoint: "" + + additionalRouters: {} + + tls: + enabled: false + secret: "" + + # enabling ingress route + ingress: + enabled: false + class: "" + + # enabling openshift route + route: + enabled: false + + # NodePort service for grpc, useful for local development + nodeport: + enabled: false + port: 30010 + routerPort: 30011 + +image: quay.io/jumpstarter-dev/jumpstarter-controller +tag: "" +imagePullPolicy: IfNotPresent diff --git a/controller/deploy/helm/jumpstarter/crds/jumpstarter.dev_clients.yaml b/controller/deploy/helm/jumpstarter/crds/jumpstarter.dev_clients.yaml new file mode 100644 index 000000000..d9dd6d0cb --- /dev/null +++ b/controller/deploy/helm/jumpstarter/crds/jumpstarter.dev_clients.yaml @@ -0,0 +1,69 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.3 + name: clients.jumpstarter.dev +spec: + group: jumpstarter.dev + names: + kind: Client + listKind: ClientList + plural: clients + singular: client + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: Client is the Schema for the identities API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ClientSpec defines the desired state of Identity + properties: + username: + type: string + type: object + status: + description: ClientStatus defines the observed state of Identity + properties: + credential: + description: Status field for the clients + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + endpoint: + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/controller/deploy/helm/jumpstarter/crds/jumpstarter.dev_exporteraccesspolicies.yaml b/controller/deploy/helm/jumpstarter/crds/jumpstarter.dev_exporteraccesspolicies.yaml new file mode 100644 index 000000000..ec1b7878c --- /dev/null +++ b/controller/deploy/helm/jumpstarter/crds/jumpstarter.dev_exporteraccesspolicies.yaml @@ -0,0 +1,166 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.3 + name: exporteraccesspolicies.jumpstarter.dev +spec: + group: jumpstarter.dev + names: + kind: ExporterAccessPolicy + listKind: ExporterAccessPolicyList + plural: exporteraccesspolicies + singular: exporteraccesspolicy + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: ExporterAccessPolicy is the Schema for the exporteraccesspolicies + API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ExporterAccessPolicySpec defines the desired state of ExporterAccessPolicy. + properties: + exporterSelector: + description: |- + A label selector is a label query over a set of resources. The result of matchLabels and + matchExpressions are ANDed. An empty label selector matches all objects. A null + label selector matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + policies: + items: + properties: + from: + items: + properties: + clientSelector: + description: |- + A label selector is a label query over a set of resources. The result of matchLabels and + matchExpressions are ANDed. An empty label selector matches all objects. A null + label selector matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: object + type: array + maximumDuration: + type: string + priority: + type: integer + spotAccess: + type: boolean + type: object + type: array + type: object + status: + description: ExporterAccessPolicyStatus defines the observed state of + ExporterAccessPolicy. + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/controller/deploy/helm/jumpstarter/crds/jumpstarter.dev_exporters.yaml b/controller/deploy/helm/jumpstarter/crds/jumpstarter.dev_exporters.yaml new file mode 100644 index 000000000..931c28b07 --- /dev/null +++ b/controller/deploy/helm/jumpstarter/crds/jumpstarter.dev_exporters.yaml @@ -0,0 +1,160 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.3 + name: exporters.jumpstarter.dev +spec: + group: jumpstarter.dev + names: + kind: Exporter + listKind: ExporterList + plural: exporters + singular: exporter + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: Exporter is the Schema for the exporters API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ExporterSpec defines the desired state of Exporter + properties: + username: + type: string + type: object + status: + description: ExporterStatus defines the observed state of Exporter + properties: + conditions: + description: Exporter status fields + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + credential: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + devices: + items: + properties: + labels: + additionalProperties: + type: string + type: object + parent_uuid: + type: string + uuid: + type: string + type: object + type: array + endpoint: + type: string + lastSeen: + format: date-time + type: string + leaseRef: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/controller/deploy/helm/jumpstarter/crds/jumpstarter.dev_leases.yaml b/controller/deploy/helm/jumpstarter/crds/jumpstarter.dev_leases.yaml new file mode 100644 index 000000000..9aafc8591 --- /dev/null +++ b/controller/deploy/helm/jumpstarter/crds/jumpstarter.dev_leases.yaml @@ -0,0 +1,235 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.3 + name: leases.jumpstarter.dev +spec: + group: jumpstarter.dev + names: + kind: Lease + listKind: LeaseList + plural: leases + singular: lease + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.ended + name: Ended + type: boolean + - jsonPath: .spec.clientRef.name + name: Client + type: string + - jsonPath: .status.exporterRef.name + name: Exporter + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: Lease is the Schema for the exporters API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: LeaseSpec defines the desired state of Lease + properties: + beginTime: + description: |- + Requested start time. If omitted, lease starts when exporter is acquired. + Immutable after lease starts (cannot change the past). + format: date-time + type: string + clientRef: + description: The client that is requesting the lease + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + duration: + description: |- + Duration of the lease. Must be positive when provided. + Can be omitted (nil) when both BeginTime and EndTime are provided, + in which case it's calculated as EndTime - BeginTime. + type: string + endTime: + description: |- + Requested end time. If specified with BeginTime, Duration is calculated. + Can be updated to extend or shorten active leases. + format: date-time + type: string + release: + description: The release flag requests the controller to end the lease + now + type: boolean + selector: + description: The selector for the exporter to be used + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + required: + - clientRef + - selector + type: object + status: + description: LeaseStatus defines the observed state of Lease + properties: + beginTime: + description: |- + If the lease has been acquired an exporter name is assigned + and then it can be used, it will be empty while still pending + format: date-time + type: string + conditions: + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + endTime: + format: date-time + type: string + ended: + type: boolean + exporterRef: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + priority: + type: integer + spotAccess: + type: boolean + required: + - ended + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/controller/deploy/helm/jumpstarter/model.py b/controller/deploy/helm/jumpstarter/model.py new file mode 100755 index 000000000..a44006dcd --- /dev/null +++ b/controller/deploy/helm/jumpstarter/model.py @@ -0,0 +1,55 @@ +#!/usr/bin/env -S uv run --script +# /// script +# requires-python = ">=3.12" +# dependencies = ["pydantic"] +# /// + +from __future__ import annotations + +import json + +from typing import Any, Dict, Optional, Union + +from pydantic import BaseModel, ConfigDict, Field + + +class Metrics(BaseModel): + model_config = ConfigDict(extra="forbid") + + enabled: Optional[bool] = Field( + None, description="Whether to enable metrics exporting and service" + ) + + +class Global(BaseModel): + model_config = ConfigDict(extra="forbid") + + namespace: Optional[str] = Field( + None, description="Namespace where the components will be deployed" + ) + timestamp: Optional[Union[int, str]] = Field( + None, + description="Timestamp to be used to trigger a new deployment, i.e. if you want pods to be restarted and pickup the latest tag", + ) + baseDomain: Optional[str] = Field( + None, description="Base domain to construct the FQDN for the service endpoints" + ) + storageClassName: Optional[str] = Field( + None, description="Storage class name for multiple reader/writer PVC" + ) + storageClassNameRWM: Optional[str] = Field( + None, description="Storage class name for the PVCs" + ) + metrics: Optional[Metrics] = None + + +class Model(BaseModel): + model_config = ConfigDict(extra="forbid") + + jumpstarter_controller: Optional[Dict[str, Any]] = Field( + None, alias="jumpstarter-controller" + ) + global_: Optional[Global] = Field(None, alias="global") + + +print(json.dumps(Model.model_json_schema(), indent=2)) diff --git a/controller/deploy/helm/jumpstarter/values.kind.yaml b/controller/deploy/helm/jumpstarter/values.kind.yaml new file mode 100644 index 000000000..d95f0283e --- /dev/null +++ b/controller/deploy/helm/jumpstarter/values.kind.yaml @@ -0,0 +1,10 @@ +global: + baseDomain: jumpstarter.127.0.0.1.nip.io + metrics: + enabled: false + +jumpstarter-controller: + # image: quay.io/jumpstarter-dev/jumpstarter-controller + # tag: latest + grpc: + mode: "ingress" diff --git a/controller/deploy/helm/jumpstarter/values.schema.json b/controller/deploy/helm/jumpstarter/values.schema.json new file mode 100644 index 000000000..8fe065c03 --- /dev/null +++ b/controller/deploy/helm/jumpstarter/values.schema.json @@ -0,0 +1,139 @@ +{ + "$defs": { + "Global": { + "additionalProperties": false, + "properties": { + "namespace": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Namespace where the components will be deployed", + "title": "Namespace" + }, + "timestamp": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Timestamp to be used to trigger a new deployment, i.e. if you want pods to be restarted and pickup the latest tag", + "title": "Timestamp" + }, + "baseDomain": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Base domain to construct the FQDN for the service endpoints", + "title": "Basedomain" + }, + "storageClassName": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Storage class name for multiple reader/writer PVC", + "title": "Storageclassname" + }, + "storageClassNameRWM": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Storage class name for the PVCs", + "title": "Storageclassnamerwm" + }, + "metrics": { + "anyOf": [ + { + "$ref": "#/$defs/Metrics" + }, + { + "type": "null" + } + ], + "default": null + } + }, + "title": "Global", + "type": "object" + }, + "Metrics": { + "additionalProperties": false, + "properties": { + "enabled": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Whether to enable metrics exporting and service", + "title": "Enabled" + } + }, + "title": "Metrics", + "type": "object" + } + }, + "additionalProperties": false, + "properties": { + "jumpstarter-controller": { + "anyOf": [ + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Jumpstarter-Controller" + }, + "global": { + "anyOf": [ + { + "$ref": "#/$defs/Global" + }, + { + "type": "null" + } + ], + "default": null + } + }, + "title": "Model", + "type": "object" +} diff --git a/controller/deploy/helm/jumpstarter/values.yaml b/controller/deploy/helm/jumpstarter/values.yaml new file mode 100644 index 000000000..577586025 --- /dev/null +++ b/controller/deploy/helm/jumpstarter/values.yaml @@ -0,0 +1,115 @@ +## @section Global parameters +## @descriptionStart This section contains parameters common to all the +## components in the deployment. +## @descriptionEnd +## +## @param global.baseDomain Base domain to construct the FQDN for the service endpoints. +## @param global.namespace Namespace where the components will be deployed. +## @param global.storageClassName Storage class name for the PVCs. +## @param global.storageClassNameRWM Storage class name for multiple reader/writer PVCs. +## @param global.metrics.enabled Enable metrics exporting and service +## @param global.timestamp Timestamp to be used to trigger a new deployment, i.e. if you want pods to be restarted and pickup ":latest" + +global: + baseDomain: jumpstarter.my.domain.com + namespace: jumpstarter-lab + storageClassName: standard + storageClassNameRWM: standard + metrics: + enabled: false + timestamp: "" # can be used to timestamp deployments and make them reload + +## @section Jumpstarter Controller parameters +## @descriptionStart This section contains parameters for the Jumpstarter Controller. +## @descriptionEnd +## +## @param jumpstarter-controller.enabled Enable the Jumpstarter Controller. + +## @param jumpstarter-controller.image Image for the controller. +## @param jumpstarter-controller.tag Tag for the controller image. +## @param jumpstarter-controller.imagePullPolicy Image pull policy for the controller. + +## @param jumpstarter-controller.namespace Namespace where the controller will be deployed, defaults to global.namespace. + +## @param jumpstarter-controller.config.grpc.keepalive.minTime. The minimum amount of time a client should wait before sending a keepalive ping. +## @param jumpstarter-controller.config.grpc.keepalive.permitWithoutStream. Whether to allow keepalive pings even when there are no active streams(RPCs). + +## @param jumpstarter-controller.config.authentication.internal.prefix. Prefix to add to the subject claim of the tokens issued by the builtin authenticator. +## @param jumpstarter-controller.config.authentication.jwt. External OIDC authentication, see https://kubernetes.io/docs/reference/access-authn-authz/authentication/#using-authentication-configuration for documentation + +## @section Ingress And Route parameters +## @descriptionStart This section contains parameters for the Ingress and Route configurations. +## You can enable either the gRPC ingress or the OpenShift route but not both. +## @descriptionEnd +## +## @param jumpstarter-controller.grpc.hostname Hostname for the controller to use for the controller gRPC. +## @param jumpstarter-controller.grpc.routerHostname Hostname for the controller to use for the router gRPC. +## +## @param jumpstarter-controller.grpc.tls.mode Setup the TLS mode for endpoints, either "passthrough" or "reencrypt". +## @param jumpstarter-controller.grpc.tls.port Port to use for the gRPC endpoints ingress or route, this can be useful for ingress routers on non-standard ports. +## @param jumpstarter-controller.grpc.tls.controllerCertSecret Secret containing the TLS certificate/key for the gRPC endpoint. +## @param jumpstarter-controller.grpc.tls.routerCertSecret Secret containing the TLS certificate/key for the gRPC router endpoints. +## +## @param jumpstarter-controller.grpc.endpoint The endpoints are passed down to the services to +## know where to announce the endpoints to the clients. +## +## @param jumpstarter-controller.grpc.routerEndpoint The endpoints are passed down to the services to +## know where to announce the endpoints to the clients. +## +## @param jumpstarter-controller.grpc.ingress.enabled Enable the gRPC ingress configuration. +## +## @param jumpstarter-controller.grpc.mode Mode to use for the gRPC endpoints, either route or ingress. + + + +jumpstarter-controller: + enabled: true + + image: quay.io/jumpstarter-dev/jumpstarter-controller + tag: "" + imagePullPolicy: IfNotPresent + + namespace: "" + + config: + grpc: + keepalive: + # Safety: potentially makes server vulnerable to DDoS + # https://grpc.io/docs/guides/keepalive/#how-configuring-keepalive-affects-a-call + minTime: 3s + permitWithoutStream: true + authentication: + internal: + prefix: "internal:" + # To trust service account tokens, first execute: + # kubectl create clusterrolebinding oidc-reviewer \ + # --clusterrole=system:service-account-issuer-discovery \ + # --group=system:unauthenticated + # Then uncomment: + # + # jwt: + # - issuer: + # url: https://kubernetes.default.svc.cluster.local + # audiences: + # - https://kubernetes.default.svc.cluster.local + # audienceMatchPolicy: MatchAny + # certificateAuthority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + # claimMappings: + # username: + # claim: "sub" + # prefix: "kubernetes:" + + grpc: + hostname: "" + routerHostname: "" + + endpoint: "" + routerEndpoint: "" + + tls: + mode: "passthrough" + port: 443 + routerCertSecret: "" + controllerCertSecret: "" + + mode: "route" # route, ingress or external(user created) diff --git a/controller/deploy/operator/Makefile b/controller/deploy/operator/Makefile new file mode 100644 index 000000000..b87499dad --- /dev/null +++ b/controller/deploy/operator/Makefile @@ -0,0 +1,357 @@ +# VERSION defines the project version for the bundle. +# Update this value when you upgrade the version of your project. +# To re-generate a bundle for another specific version without changing the standard setup, you can: +# - use the VERSION as arg of the bundle target (e.g make bundle VERSION=0.0.2) +# - use environment variables to overwrite this value (e.g export VERSION=0.0.2) +VERSION ?= 0.8.0 + +# CHANNELS define the bundle channels used in the bundle. +# Add a new line here if you would like to change its default config. (E.g CHANNELS = "candidate,fast,stable") +# To re-generate a bundle for other specific channels without changing the standard setup, you can: +# - use the CHANNELS as arg of the bundle target (e.g make bundle CHANNELS=candidate,fast,stable) +# - use environment variables to overwrite this value (e.g export CHANNELS="candidate,fast,stable") +ifneq ($(origin CHANNELS), undefined) +BUNDLE_CHANNELS := --channels=$(CHANNELS) +endif + +# DEFAULT_CHANNEL defines the default channel used in the bundle. +# Add a new line here if you would like to change its default config. (E.g DEFAULT_CHANNEL = "stable") +# To re-generate a bundle for any other default channel without changing the default setup, you can: +# - use the DEFAULT_CHANNEL as arg of the bundle target (e.g make bundle DEFAULT_CHANNEL=stable) +# - use environment variables to overwrite this value (e.g export DEFAULT_CHANNEL="stable") +ifneq ($(origin DEFAULT_CHANNEL), undefined) +BUNDLE_DEFAULT_CHANNEL := --default-channel=$(DEFAULT_CHANNEL) +endif +BUNDLE_METADATA_OPTS ?= $(BUNDLE_CHANNELS) $(BUNDLE_DEFAULT_CHANNEL) + +# IMAGE_TAG_BASE defines the docker.io namespace and part of the image name for remote images. +# This variable is used to construct full image tags for bundle and catalog images. +# +# For example, running 'make bundle-build bundle-push catalog-build catalog-push' will build and push both +# jumpstarter.dev/jumpstarter-operator-bundle:$VERSION and jumpstarter.dev/jumpstarter-operator-catalog:$VERSION. +IMAGE_TAG_BASE ?= quay.io/jumpstarter-dev/jumpstarter-operator + +# BUNDLE_IMG defines the image:tag used for the bundle. +# You can use it as an arg. (E.g make bundle-build BUNDLE_IMG=/:) +BUNDLE_IMG ?= $(IMAGE_TAG_BASE)-bundle:v$(VERSION) + +# BUNDLE_GEN_FLAGS are the flags passed to the operator-sdk generate bundle command +BUNDLE_GEN_FLAGS ?= -q --overwrite --version $(VERSION) $(BUNDLE_METADATA_OPTS) + +# USE_IMAGE_DIGESTS defines if images are resolved via tags or digests +# You can enable this value if you would like to use SHA Based Digests +# To enable set flag to true +USE_IMAGE_DIGESTS ?= false +ifeq ($(USE_IMAGE_DIGESTS), true) + BUNDLE_GEN_FLAGS += --use-image-digests +endif + +# Set the Operator SDK version to use. By default, what is installed on the system is used. +# This is useful for CI or a project to utilize a specific version of the operator-sdk toolkit. +OPERATOR_SDK_VERSION ?= v1.41.1 +# Image URL to use all building/pushing image targets +IMG ?= quay.io/jumpstarter-dev/jumpstarter-operator:latest + +# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) +ifeq (,$(shell go env GOBIN)) +GOBIN=$(shell go env GOPATH)/bin +else +GOBIN=$(shell go env GOBIN) +endif + +# CONTAINER_TOOL defines the container tool to be used for building images. +# Be aware that the target commands are only tested with Docker which is +# scaffolded by default. However, you might want to replace it to use other +# tools. (i.e. podman) +CONTAINER_TOOL ?= podman + +# Version information +GIT_VERSION := $(shell git describe --tags --always --dirty 2>/dev/null || echo "unknown") +GIT_COMMIT := $(shell git rev-parse HEAD 2>/dev/null || echo "unknown") +BUILD_DATE := $(shell date -u +'%Y-%m-%dT%H:%M:%SZ') + +# LDFLAGS for version information +LDFLAGS := -X main.version=$(GIT_VERSION) \ + -X main.gitCommit=$(GIT_COMMIT) \ + -X main.buildDate=$(BUILD_DATE) + +# Setting SHELL to bash allows bash commands to be executed by recipes. +# Options are set to exit when a recipe line exits non-zero or a piped command fails. +SHELL = /usr/bin/env bash -o pipefail +.SHELLFLAGS = -ec + +.PHONY: all +all: build + +##@ General + +# The help target prints out all targets with their descriptions organized +# beneath their categories. The categories are represented by '##@' and the +# target descriptions by '##'. The awk command is responsible for reading the +# entire set of makefiles included in this invocation, looking for lines of the +# file as xyz: ## something, and then pretty-format the target and help. Then, +# if there's a line with ##@ something, that gets pretty-printed as a category. +# More info on the usage of ANSI control characters for terminal formatting: +# https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters +# More info on the awk command: +# http://linuxcommand.org/lc3_adv_awk.php + +.PHONY: help +help: ## Display this help. + @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) + +##@ Development + +.PHONY: manifests +manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects. + $(CONTROLLER_GEN) rbac:roleName=manager-role crd webhook paths="./..." output:crd:artifacts:config=config/crd/bases + +.PHONY: generate +generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations. + $(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..." + +.PHONY: fmt +fmt: ## Run go fmt against code. + go fmt ./... + +.PHONY: vet +vet: ## Run go vet against code. + go vet ./... + +.PHONY: test +test: manifests generate fmt vet setup-envtest ## Run tests. + KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go test $$(go list ./... | grep -v /e2e) -coverprofile cover.out + + +.PHONY: test-e2e +test-e2e: manifests generate fmt vet ## Run the e2e tests. Expected an isolated environment using Kind. + KIND_CLUSTER=kind-jumpstarter go test ./test/e2e/ -v -ginkgo.v + + +.PHONY: lint +lint: golangci-lint ## Run golangci-lint linter + $(GOLANGCI_LINT) run + +.PHONY: lint-fix +lint-fix: golangci-lint ## Run golangci-lint linter and perform fixes + $(GOLANGCI_LINT) run --fix + +.PHONY: lint-config +lint-config: golangci-lint ## Verify golangci-lint linter configuration + $(GOLANGCI_LINT) config verify + +##@ Build + +.PHONY: build +build: manifests generate fmt vet ## Build manager binary. + go build -ldflags "$(LDFLAGS)" -o bin/manager cmd/main.go + +.PHONY: run +run: manifests generate fmt vet ## Run a controller from your host. + go run ./cmd/main.go + +# If you wish to build the manager image targeting other platforms you can use the --platform flag. +# (i.e. docker build --platform linux/arm64). However, you must enable docker buildKit for it. +# More info: https://docs.docker.com/develop/develop-images/build_enhancements/ +.PHONY: docker-build +docker-build: ## Build docker image with the manager. + $(CONTAINER_TOOL) build \ + --build-arg GIT_VERSION=$(GIT_VERSION) \ + --build-arg GIT_COMMIT=$(GIT_COMMIT) \ + --build-arg BUILD_DATE=$(BUILD_DATE) \ + -t ${IMG} ../../ -f ../../Dockerfile.operator + +.PHONY: docker-push +docker-push: ## Push docker image with the manager. + $(CONTAINER_TOOL) push ${IMG} + +# PLATFORMS defines the target platforms for the manager image be built to provide support to multiple +# architectures. (i.e. make docker-buildx IMG=myregistry/mypoperator:0.0.1). To use this option you need to: +# - be able to use docker buildx. More info: https://docs.docker.com/build/buildx/ +# - have enabled BuildKit. More info: https://docs.docker.com/develop/develop-images/build_enhancements/ +# - be able to push the image to your registry (i.e. if you do not set a valid value via IMG=> then the export will fail) +# To adequately provide solutions that are compatible with multiple platforms, you should consider using this option. +PLATFORMS ?= linux/arm64,linux/amd64,linux/s390x,linux/ppc64le +.PHONY: docker-buildx +docker-buildx: ## Build and push docker image for the manager for cross-platform support + # copy existing Dockerfile and insert --platform=${BUILDPLATFORM} into Dockerfile.cross, and preserve the original Dockerfile + sed -e '1 s/\(^FROM\)/FROM --platform=\$$\{BUILDPLATFORM\}/; t' -e ' 1,// s//FROM --platform=\$$\{BUILDPLATFORM\}/' ../../Dockerfile.operator > ../../Dockerfile.operator.cross + - $(CONTAINER_TOOL) buildx create --name jumpstarter-operator-builder + $(CONTAINER_TOOL) buildx use jumpstarter-operator-builder + - $(CONTAINER_TOOL) buildx build --push --platform=$(PLATFORMS) \ + --build-arg GIT_VERSION=$(GIT_VERSION) \ + --build-arg GIT_COMMIT=$(GIT_COMMIT) \ + --build-arg BUILD_DATE=$(BUILD_DATE) \ + --tag ${IMG} -f ../../Dockerfile.operator.cross ../../ + - $(CONTAINER_TOOL) buildx rm jumpstarter-operator-builder + rm ../../Dockerfile.operator.cross + +.PHONY: build-installer +build-installer: manifests generate kustomize ## Generate a consolidated YAML with CRDs and deployment. + mkdir -p dist + cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} + $(KUSTOMIZE) build config/default > dist/install.yaml + +##@ Deployment + +ifndef ignore-not-found + ignore-not-found = false +endif + +.PHONY: install +install: manifests kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config. + $(KUSTOMIZE) build config/crd | $(KUBECTL) apply -f - + +.PHONY: uninstall +uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion. + $(KUSTOMIZE) build config/crd | $(KUBECTL) delete --ignore-not-found=$(ignore-not-found) -f - + +.PHONY: deploy +deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config. + cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} + $(KUSTOMIZE) build config/default | $(KUBECTL) apply -f - + +.PHONY: undeploy +undeploy: kustomize ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion. + $(KUSTOMIZE) build config/default | $(KUBECTL) delete --ignore-not-found=$(ignore-not-found) -f - + +##@ Dependencies + +## Location to install dependencies to +LOCALBIN ?= $(shell pwd)/bin +$(LOCALBIN): + mkdir -p $(LOCALBIN) + +## Tool Binaries +KUBECTL ?= kubectl +KIND ?= kind +KUSTOMIZE ?= $(LOCALBIN)/kustomize +CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen +ENVTEST ?= $(LOCALBIN)/setup-envtest +GOLANGCI_LINT = $(LOCALBIN)/golangci-lint + +## Tool Versions +KUSTOMIZE_VERSION ?= v5.6.0 +CONTROLLER_TOOLS_VERSION ?= v0.18.0 +#ENVTEST_VERSION is the version of controller-runtime release branch to fetch the envtest setup script (i.e. release-0.20) +ENVTEST_VERSION ?= $(shell go list -m -f "{{ .Version }}" sigs.k8s.io/controller-runtime | awk -F'[v.]' '{printf "release-%d.%d", $$2, $$3}') +#ENVTEST_K8S_VERSION is the version of Kubernetes to use for setting up ENVTEST binaries (i.e. 1.31) +ENVTEST_K8S_VERSION ?= $(shell go list -m -f "{{ .Version }}" k8s.io/api | awk -F'[v.]' '{printf "1.%d", $$3}') +GOLANGCI_LINT_VERSION ?= v2.5.0 + +.PHONY: kustomize +kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary. +$(KUSTOMIZE): $(LOCALBIN) + $(call go-install-tool,$(KUSTOMIZE),sigs.k8s.io/kustomize/kustomize/v5,$(KUSTOMIZE_VERSION)) + +.PHONY: controller-gen +controller-gen: $(CONTROLLER_GEN) ## Download controller-gen locally if necessary. +$(CONTROLLER_GEN): $(LOCALBIN) + $(call go-install-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen,$(CONTROLLER_TOOLS_VERSION)) + +.PHONY: setup-envtest +setup-envtest: envtest ## Download the binaries required for ENVTEST in the local bin directory. + @echo "Setting up envtest binaries for Kubernetes version $(ENVTEST_K8S_VERSION)..." + @$(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path || { \ + echo "Error: Failed to set up envtest binaries for version $(ENVTEST_K8S_VERSION)."; \ + exit 1; \ + } + +.PHONY: envtest +envtest: $(ENVTEST) ## Download setup-envtest locally if necessary. +$(ENVTEST): $(LOCALBIN) + $(call go-install-tool,$(ENVTEST),sigs.k8s.io/controller-runtime/tools/setup-envtest,$(ENVTEST_VERSION)) + +.PHONY: golangci-lint +golangci-lint: $(GOLANGCI_LINT) ## Download golangci-lint locally if necessary. +$(GOLANGCI_LINT): $(LOCALBIN) + $(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/v2/cmd/golangci-lint,$(GOLANGCI_LINT_VERSION)) + +# go-install-tool will 'go install' any package with custom target and name of binary, if it doesn't exist +# $1 - target path with name of binary +# $2 - package url which can be installed +# $3 - specific version of package +define go-install-tool +@[ -f "$(1)-$(3)" ] || { \ +set -e; \ +package=$(2)@$(3) ;\ +echo "Downloading $${package}" ;\ +rm -f $(1) || true ;\ +GOBIN=$(LOCALBIN) go install $${package} ;\ +mv $(1) $(1)-$(3) ;\ +} ;\ +ln -sf $(1)-$(3) $(1) +endef + +.PHONY: operator-sdk +OPERATOR_SDK ?= $(LOCALBIN)/operator-sdk +operator-sdk: ## Download operator-sdk locally if necessary. +ifeq (,$(wildcard $(OPERATOR_SDK))) +ifeq (, $(shell which operator-sdk 2>/dev/null)) + @{ \ + set -e ;\ + mkdir -p $(dir $(OPERATOR_SDK)) ;\ + OS=$(shell go env GOOS) && ARCH=$(shell go env GOARCH) && \ + curl -sSLo $(OPERATOR_SDK) https://github.com/operator-framework/operator-sdk/releases/download/$(OPERATOR_SDK_VERSION)/operator-sdk_$${OS}_$${ARCH} ;\ + chmod +x $(OPERATOR_SDK) ;\ + } +else +OPERATOR_SDK = $(shell which operator-sdk) +endif +endif + +.PHONY: bundle +bundle: manifests kustomize operator-sdk ## Generate bundle manifests and metadata, then validate generated files. + $(OPERATOR_SDK) generate kustomize manifests -q + cd config/manager && $(KUSTOMIZE) edit set image controller=$(IMG) + $(KUSTOMIZE) build config/manifests | $(OPERATOR_SDK) generate bundle $(BUNDLE_GEN_FLAGS) + $(OPERATOR_SDK) bundle validate ./bundle + +.PHONY: bundle-build +bundle-build: ## Build the bundle image. + $(CONTAINER_TOOL) build -f bundle.Dockerfile -t $(BUNDLE_IMG) . + +.PHONY: bundle-push +bundle-push: ## Push the bundle image. + $(MAKE) docker-push IMG=$(BUNDLE_IMG) + +.PHONY: opm +OPM = $(LOCALBIN)/opm +opm: ## Download opm locally if necessary. +ifeq (,$(wildcard $(OPM))) +ifeq (,$(shell which opm 2>/dev/null)) + @{ \ + set -e ;\ + mkdir -p $(dir $(OPM)) ;\ + OS=$(shell go env GOOS) && ARCH=$(shell go env GOARCH) && \ + curl -sSLo $(OPM) https://github.com/operator-framework/operator-registry/releases/download/v1.55.0/$${OS}-$${ARCH}-opm ;\ + chmod +x $(OPM) ;\ + } +else +OPM = $(shell which opm) +endif +endif + +# A comma-separated list of bundle images (e.g. make catalog-build BUNDLE_IMGS=example.com/operator-bundle:v0.1.0,example.com/operator-bundle:v0.2.0). +# These images MUST exist in a registry and be pull-able. +BUNDLE_IMGS ?= $(BUNDLE_IMG) + +# The image tag given to the resulting catalog image (e.g. make catalog-build CATALOG_IMG=example.com/operator-catalog:v0.2.0). +CATALOG_IMG ?= $(IMAGE_TAG_BASE)-catalog:v$(VERSION) + +# Set CATALOG_BASE_IMG to an existing catalog image tag to add $BUNDLE_IMGS to that image. +ifneq ($(origin CATALOG_BASE_IMG), undefined) +FROM_INDEX_OPT := --from-index $(CATALOG_BASE_IMG) +endif + +# Build a catalog image by adding bundle images to an empty catalog using the operator package manager tool, 'opm'. +# This recipe invokes 'opm' in 'semver' bundle add mode. For more information on add modes, see: +# https://github.com/operator-framework/community-operators/blob/7f1438c/docs/packaging-operator.md#updating-your-existing-operator +.PHONY: catalog-build +catalog-build: opm ## Build a catalog image. + $(OPM) index add --container-tool $(CONTAINER_TOOL) --mode semver --tag $(CATALOG_IMG) --bundles $(BUNDLE_IMGS) $(FROM_INDEX_OPT) + +# Push the catalog image. +.PHONY: catalog-push +catalog-push: ## Push a catalog image. + $(MAKE) docker-push IMG=$(CATALOG_IMG) diff --git a/controller/deploy/operator/PROJECT b/controller/deploy/operator/PROJECT new file mode 100644 index 000000000..603af80bf --- /dev/null +++ b/controller/deploy/operator/PROJECT @@ -0,0 +1,23 @@ +# Code generated by tool. DO NOT EDIT. +# This file is used to track the info used to scaffold your project +# and allow the plugins properly work. +# More info: https://book.kubebuilder.io/reference/project-config.html +domain: jumpstarter.dev +layout: +- go.kubebuilder.io/v4 +plugins: + manifests.sdk.operatorframework.io/v2: {} + scorecard.sdk.operatorframework.io/v2: {} +projectName: jumpstarter-operator +repo: github.com/jumpstarter-dev/jumpstarter-controller/deploy/operator +resources: +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: jumpstarter.dev + group: operator + kind: Jumpstarter + path: github.com/jumpstarter-dev/jumpstarter-controller/deploy/operator/api/v1alpha1 + version: v1alpha1 +version: "3" diff --git a/controller/deploy/operator/README.md b/controller/deploy/operator/README.md new file mode 100644 index 000000000..5e9d8d49c --- /dev/null +++ b/controller/deploy/operator/README.md @@ -0,0 +1,135 @@ +# jumpstarter-operator +// TODO(user): Add simple overview of use/purpose + +## Description +// TODO(user): An in-depth paragraph about your project and overview of use + +## Getting Started + +### Prerequisites +- go version v1.24.0+ +- docker version 17.03+. +- kubectl version v1.11.3+. +- Access to a Kubernetes v1.11.3+ cluster. + +### To Deploy on the cluster +**Build and push your image to the location specified by `IMG`:** + +```sh +make docker-build docker-push IMG=/jumpstarter-operator:tag +``` + +**NOTE:** This image ought to be published in the personal registry you specified. +And it is required to have access to pull the image from the working environment. +Make sure you have the proper permission to the registry if the above commands don’t work. + +**Install the CRDs into the cluster:** + +```sh +make install +``` + +**Deploy the Manager to the cluster with the image specified by `IMG`:** + +```sh +make deploy IMG=/jumpstarter-operator:tag +``` + +> **NOTE**: If you encounter RBAC errors, you may need to grant yourself cluster-admin +privileges or be logged in as admin. + +**Create instances of your solution** +You can apply the samples (examples) from the config/sample: + +```sh +kubectl apply -k config/samples/ +``` + +>**NOTE**: Ensure that the samples has default values to test it out. + +### To Uninstall +**Delete the instances (CRs) from the cluster:** + +```sh +kubectl delete -k config/samples/ +``` + +**Delete the APIs(CRDs) from the cluster:** + +```sh +make uninstall +``` + +**UnDeploy the controller from the cluster:** + +```sh +make undeploy +``` + +## Project Distribution + +Following the options to release and provide this solution to the users. + +### By providing a bundle with all YAML files + +1. Build the installer for the image built and published in the registry: + +```sh +make build-installer IMG=/jumpstarter-operator:tag +``` + +**NOTE:** The makefile target mentioned above generates an 'install.yaml' +file in the dist directory. This file contains all the resources built +with Kustomize, which are necessary to install this project without its +dependencies. + +2. Using the installer + +Users can just run 'kubectl apply -f ' to install +the project, i.e.: + +```sh +kubectl apply -f https://raw.githubusercontent.com//jumpstarter-operator//dist/install.yaml +``` + +### By providing a Helm Chart + +1. Build the chart using the optional helm plugin + +```sh +operator-sdk edit --plugins=helm/v1-alpha +``` + +2. See that a chart was generated under 'dist/chart', and users +can obtain this solution from there. + +**NOTE:** If you change the project, you need to update the Helm Chart +using the same command above to sync the latest changes. Furthermore, +if you create webhooks, you need to use the above command with +the '--force' flag and manually ensure that any custom configuration +previously added to 'dist/chart/values.yaml' or 'dist/chart/manager/manager.yaml' +is manually re-applied afterwards. + +## Contributing +// TODO(user): Add detailed information on how you would like others to contribute to this project + +**NOTE:** Run `make help` for more information on all potential `make` targets + +More information can be found via the [Kubebuilder Documentation](https://book.kubebuilder.io/introduction.html) + +## License + +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + diff --git a/controller/deploy/operator/api/v1alpha1/groupversion_info.go b/controller/deploy/operator/api/v1alpha1/groupversion_info.go new file mode 100644 index 000000000..c46b1b427 --- /dev/null +++ b/controller/deploy/operator/api/v1alpha1/groupversion_info.go @@ -0,0 +1,36 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1alpha1 contains API Schema definitions for the operator v1alpha1 API group. +// +kubebuilder:object:generate=true +// +groupName=operator.jumpstarter.dev +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects. + GroupVersion = schema.GroupVersion{Group: "operator.jumpstarter.dev", Version: "v1alpha1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme. + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/controller/deploy/operator/api/v1alpha1/jumpstarter_types.go b/controller/deploy/operator/api/v1alpha1/jumpstarter_types.go new file mode 100644 index 000000000..be4635795 --- /dev/null +++ b/controller/deploy/operator/api/v1alpha1/jumpstarter_types.go @@ -0,0 +1,548 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apiserverv1beta1 "k8s.io/apiserver/pkg/apis/apiserver/v1beta1" +) + +// yaml mockup of the JumpstarterSpec +// spec: +// baseDomain: example.com +// useCertManager: true +// controller: +// image: quay.io/jumpstarter/jumpstarter:0.7.2 +// imagePullPolicy: IfNotPresent +// resources: +// requests: +// cpu: 100m +// memory: 100Mi +// replicas: 2 +// exporterOptions: +// offlineTimeout: 180s +// restApi: +// tls: +// certSecret: jumpstarter-rest-api-tls +// endpoints: +// - hostname: rest-api.example.com +// route: +// class: default +// grpc: +// tls: +// certSecret: jumpstarter-tls +// endpoints: +// - hostname: grpc.example.com +// route: +// enabled: true +// - hostname: grpc2.example.com +// ingress: +// enabled: true +// annotations: +// labels: +// - hostname: this.one.is.optional.com +// nodeport: +// enabled: true +// port: 9090 +// annotations: +// labels: +// - hostname: this.one.is.optional.too.com +// loadBalancer: +// enabled: true +// port: 9090 +// annotations: +// labels: +// keepalive: +// minTime: 1s +// permitWithoutStream: true +// timeout: 180s +// intervalTime: 10s +// routers: +// image: quay.io/jumpstarter/jumpstarter:0.7.2 +// imagePullPolicy: IfNotPresent +// resources: +// requests: +// cpu: 100m +// memory: 100Mi +// replicas: 3 +// topologySpreadConstraints: +// - topologyKey: "kubernetes.io/hostname" +// whenUnsatisfiable: ScheduleAnyway +// - topologyKey: "kubernetes.io/zone" +// whenUnsatisfiable: ScheduleAnyway +// grpc: +// tls: +// certSecret: jumpstarter-router-tls +// endpoints: +// - hostname: router-$(replica).router.example.com +// route: +// enabled: true +// ingress: +// enabled: true +// class: default +// nodeport: +// enabled: true +// port: 9090 +// loadBalancer: +// annotations: +// labels: +// enabled: true +// keepalive: +// minTime: 1s +// permitWithoutStream: true +// timeout: 180s +// intervalTime: 10s +// authentication: +// internal: +// prefix: "internal:" +// enabled: true +// k8s: +// enabled: true +// jwt: +// - issuer: +// url: https://auth.example.com/auth/realms/EmployeeIDP +// audiences: +// - account +// claimMappings: +// username: +// claim: "preferred_username" +// prefix: "corp:" +// + +// JumpstarterSpec defines the desired state of a Jumpstarter deployment. A deployment +// can be created in a namespace of the cluster, and that's where all the Jumpstarter +// resources and services will reside. +type JumpstarterSpec struct { + // Base domain used to construct FQDNs for all service endpoints. + // This domain will be used to generate the default hostnames for Routes, Ingresses, and certificates. + // Example: "example.com" will generate endpoints like "grpc.example.com", "router.example.com" + // +kubebuilder:validation:Pattern=^[a-z0-9]([a-z0-9\-\.]*[a-z0-9])?$ + BaseDomain string `json:"baseDomain,omitempty"` + + // Enable automatic TLS certificate management using cert-manager. + // When enabled, jumpstarter will interact with cert-manager to automatically provision + // and renew TLS certificates for all endpoints. Requires cert-manager to be installed in the cluster. + // +kubebuilder:default=true + UseCertManager bool `json:"useCertManager,omitempty"` + + // Controller configuration for the main Jumpstarter API and gRPC services. + // The controller handles gRPC and REST API requests from clients and exporters. + // +kubebuilder:default={} + Controller ControllerConfig `json:"controller,omitempty"` + + // Router configuration for the Jumpstarter router service. + // Routers handle gRPC traffic routing and load balancing. + // +kubebuilder:default={} + Routers RoutersConfig `json:"routers,omitempty"` + + // Authentication configuration for client and exporter authentication. + // Supports multiple authentication methods including internal tokens, Kubernetes tokens, and JWT. + Authentication AuthenticationConfig `json:"authentication,omitempty"` +} + +// RoutersConfig defines the configuration for Jumpstarter router pods. +// Routers handle gRPC traffic routing and load balancing between clients and exporters. +type RoutersConfig struct { + // Container image for the router pods in 'registry/repository/image:tag' format. + // If not specified, defaults to the latest stable version of the Jumpstarter router. + // +kubebuilder:default="quay.io/jumpstarter-dev/jumpstarter-controller:latest" + Image string `json:"image,omitempty"` + + // Image pull policy for the router container. + // Controls when the container image should be pulled from the registry. + // +kubebuilder:default="IfNotPresent" + // +kubebuilder:validation:Enum=Always;IfNotPresent;Never + ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty"` + + // Resource requirements for router pods. + // Defines CPU and memory requests and limits for each router pod. + Resources corev1.ResourceRequirements `json:"resources,omitempty"` + + // Number of router replicas to run. + // Must be a positive integer. Minimum recommended value is 3 for high availability. + // +kubebuilder:default=3 + // +kubebuilder:validation:Minimum=1 + Replicas int32 `json:"replicas,omitempty"` + + // Topology spread constraints for router pod distribution. + // Ensures router pods are distributed evenly across nodes and zones. + // Useful for high availability and fault tolerance. + TopologySpreadConstraints []corev1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"` + + // gRPC configuration for router endpoints. + // Defines how router gRPC services are exposed and configured. + GRPC GRPCConfig `json:"grpc,omitempty"` +} + +// ControllerConfig defines the configuration for Jumpstarter controller pods. +// The controller is responsible for the gRPC and REST API services used by clients +// and exporters to interact with Jumpstarter. +type ControllerConfig struct { + // Container image for the controller pods in 'registry/repository/image:tag' format. + // If not specified, defaults to the latest stable version of the Jumpstarter controller. + // +kubebuilder:default="quay.io/jumpstarter-dev/jumpstarter-controller:latest" + Image string `json:"image,omitempty"` + + // Image pull policy for the controller container. + // Controls when the container image should be pulled from the registry. + // +kubebuilder:default="IfNotPresent" + // +kubebuilder:validation:Enum=Always;IfNotPresent;Never + ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty"` + + // Resource requirements for controller pods. + // Defines CPU and memory requests and limits for each controller pod. + Resources corev1.ResourceRequirements `json:"resources,omitempty"` + + // Number of controller replicas to run. + // Must be a positive integer. Minimum recommended value is 2 for high availability. + // +kubebuilder:default=2 + // +kubebuilder:validation:Minimum=1 + Replicas int32 `json:"replicas,omitempty"` + + // Exporter options configuration. + // Controls how exporters connect and behave when communicating with the controller. + ExporterOptions ExporterOptions `json:"exporterOptions,omitempty"` + + // REST API configuration for HTTP-based clients. + // Enables non-gRPC clients to interact with Jumpstarter for listing leases, + // managing exporters, and creating new leases. Use this when you need HTTP/JSON access. + RestAPI RestAPIConfig `json:"restApi,omitempty"` + + // gRPC configuration for controller endpoints. + // Defines how controller gRPC services are exposed and configured. + GRPC GRPCConfig `json:"grpc,omitempty"` + + // Authentication configuration for client and exporter authentication. + // Configures how clients and exporters can authenticate with Jumpstarter. + // Supports multiple authentication methods including internal tokens, Kubernetes tokens, and JWT. + Authentication AuthenticationConfig `json:"authentication,omitempty"` +} + +// ExporterOptions defines configuration options for exporter behavior. +type ExporterOptions struct { + // Offline timeout duration for exporters. + // After this duration without communication, an exporter is considered offline. + // This drives the online/offline status field of exporters, and offline exporters + // won't be considered for leases. + // +kubebuilder:default="180s" + OfflineTimeout *metav1.Duration `json:"offlineTimeout,omitempty"` +} + +// GRPCConfig defines gRPC service configuration. +// Configures how gRPC services are exposed and their connection behavior. +type GRPCConfig struct { + // TLS configuration for secure gRPC communication. + // Requires a Kubernetes secret containing the TLS certificate and private key. + // If useCertManager is enabled, this secret will be automatically created. + // See also: spec.useCertManager for automatic certificate management. + TLS TLSConfig `json:"tls,omitempty"` + + // List of gRPC endpoints to expose. + // Each endpoint can use different networking methods (Route, Ingress, NodePort, or LoadBalancer) + // based on your cluster setup. Example: Use Route for OpenShift, Ingress for standard Kubernetes. + Endpoints []Endpoint `json:"endpoints,omitempty"` + + // Keepalive configuration for gRPC connections. + // Controls connection health checks and idle connection management. + // Helps maintain stable connections in load-balanced environments. + Keepalive *GRPCKeepaliveConfig `json:"keepalive,omitempty"` +} + +// GRPCKeepaliveConfig defines keepalive settings for gRPC connections. +// These settings help maintain stable connections in load-balanced environments +// and detect connection issues early. +type GRPCKeepaliveConfig struct { + // Minimum time between keepalives that the connection will accept, under this threshold + // the other side will get a GOAWAY signal. + // Prevents excessive keepalive traffic on the network. + // +kubebuilder:default="1s" + MinTime *metav1.Duration `json:"minTime,omitempty"` + + // Allow keepalive pings even when there are no active RPC streams. + // Useful for detecting connection issues in idle connections. + // This is important to keep TCP gRPC connections alive when traversing + // load balancers and proxies. + // +kubebuilder:default=true + PermitWithoutStream bool `json:"permitWithoutStream,omitempty"` + + // Timeout for keepalive ping acknowledgment. + // If a ping is not acknowledged within this time, the connection is considered broken. + // The default is high to avoid issues when the network on a exporter is overloaded, i.e. + // during flashing. + // +kubebuilder:default="180s" + Timeout *metav1.Duration `json:"timeout,omitempty"` + + // Maximum time a connection can remain idle before being closed. + // It defaults to infinity. + MaxConnectionIdle *metav1.Duration `json:"maxConnectionIdle,omitempty"` + + // Maximum age of a connection before it is closed and recreated. + // Helps prevent issues with long-lived connections. It defaults to infinity. + MaxConnectionAge *metav1.Duration `json:"maxConnectionAge,omitempty"` + + // Grace period for closing connections that exceed MaxConnectionAge. + // Allows ongoing RPCs to complete before closing the connection. + MaxConnectionAgeGrace *metav1.Duration `json:"maxConnectionAgeGrace,omitempty"` + + // Interval between keepalive pings. + // How often to send keepalive pings to check connection health. This is important + // to keep TCP gRPC connections alive when traversing load balancers and proxies. + // +kubebuilder:default="10s" + IntervalTime *metav1.Duration `json:"intervalTime,omitempty"` +} + +// AuthenticationConfig defines authentication methods for Jumpstarter. +// Supports multiple authentication methods that can be enabled simultaneously. +type AuthenticationConfig struct { + // Internal authentication configuration. + // Built-in authenticator that issues tokens for clients and exporters. + // This is the simplest authentication method and is enabled by default. + Internal InternalAuthConfig `json:"internal,omitempty"` + + // Kubernetes authentication configuration. + // Enables authentication using Kubernetes service account tokens. + // Useful for integrating with existing Kubernetes RBAC policies. + K8s K8sAuthConfig `json:"k8s,omitempty"` + + // JWT authentication configuration. + // Enables authentication using external JWT tokens from OIDC providers. + // Supports multiple JWT authenticators for different identity providers. + JWT []apiserverv1beta1.JWTAuthenticator `json:"jwt,omitempty"` +} + +// InternalAuthConfig defines the built-in authentication configuration. +// The internal authenticator issues tokens for clients and exporters to authenticate +// with Jumpstarter. This is the simplest authentication method. +type InternalAuthConfig struct { + // Prefix to add to the subject claim of issued tokens. + // Helps distinguish internal tokens from other authentication methods. + // Example: "internal:" will result in subjects like "internal:user123" + // +kubebuilder:default="internal:" + // +kubebuilder:validation:MaxLength=50 + Prefix string `json:"prefix,omitempty"` + + // Enable the internal authentication method. + // When disabled, clients cannot use internal tokens for authentication. + // +kubebuilder:default=true + Enabled bool `json:"enabled,omitempty"` + + // Token validity duration for issued tokens. + // After this duration, tokens expire and must be renewed. + // +kubebuilder:default="43800h" + TokenLifetime *metav1.Duration `json:"tokenLifetime,omitempty"` +} + +// K8sAuthConfig defines Kubernetes service account authentication. +// Enables authentication using Kubernetes service account tokens. +type K8sAuthConfig struct { + // Enable Kubernetes authentication. + // When enabled, clients can authenticate using Kubernetes service account tokens. + // +kubebuilder:default=false + Enabled bool `json:"enabled,omitempty"` +} + +// TLSConfig defines TLS configuration for secure communication. +type TLSConfig struct { + // Name of the Kubernetes secret containing the TLS certificate and private key. + // The secret must contain 'tls.crt' and 'tls.key' keys. + // If useCertManager is enabled, this secret will be automatically created. + // +kubebuilder:validation:Pattern=^[a-z0-9]([a-z0-9\-\.]*[a-z0-9])?$ + CertSecret string `json:"certSecret,omitempty"` +} + +// RestAPIConfig defines REST API configuration for HTTP-based clients. +// Provides HTTP/JSON access to Jumpstarter functionality. +type RestAPIConfig struct { + // TLS configuration for secure HTTP communication. + // Requires a Kubernetes secret containing the TLS certificate and private key. + TLS TLSConfig `json:"tls,omitempty"` + + // List of REST API endpoints to expose. + // Each endpoint can use different networking methods (Route, Ingress, NodePort, or LoadBalancer) + // based on your cluster setup. + Endpoints []Endpoint `json:"endpoints,omitempty"` +} + +// Endpoint defines a single endpoint configuration. +// An endpoint can use one or more networking methods: Route, Ingress, NodePort, or LoadBalancer. +// Multiple methods can be configured simultaneously for the same address. +type Endpoint struct { + // Address for this endpoint in the format "hostname", "hostname:port", "IPv4", "IPv4:port", "[IPv6]", or "[IPv6]:port". + // Required for Route and Ingress endpoints. Optional for NodePort and LoadBalancer endpoints. + // When optional, the address is used for certificate generation and DNS resolution. + // Supports templating with $(replica) for replica-specific addresses. + // Examples: "grpc.example.com", "grpc.example.com:9090", "192.168.1.1:8080", "[2001:db8::1]:8443", "router-$(replica).example.com" + // +kubebuilder:validation:Pattern=`^(\[[0-9a-fA-F:\.]+\]|[0-9]+(\.[0-9]+){3}|[a-z0-9$]([a-z0-9\-\.\$\(\)]*[a-z0-9\)])?)(:[0-9]+)?$` + Address string `json:"address,omitempty"` + + // Route configuration for OpenShift clusters. + // Creates an OpenShift Route resource for this endpoint. + // Only applicable in OpenShift environments. + Route *RouteConfig `json:"route,omitempty"` + + // Ingress configuration for standard Kubernetes clusters. + // Creates an Ingress resource for this endpoint. + // Requires an ingress controller to be installed. + Ingress *IngressConfig `json:"ingress,omitempty"` + + // NodePort configuration for direct node access. + // Exposes the service on a specific port on each node. + // Useful for bare-metal or simple cluster setups. + NodePort *NodePortConfig `json:"nodeport,omitempty"` + + // LoadBalancer configuration for cloud environments. + // Creates a LoadBalancer service for this endpoint. + // Requires cloud provider support for LoadBalancer services. + LoadBalancer *LoadBalancerConfig `json:"loadBalancer,omitempty"` + + // ClusterIP configuration for internal service access. + // Creates a ClusterIP service for this endpoint. + // Useful for internal service-to-service communication or when + // using a different method to expose the service externally. + ClusterIP *ClusterIPConfig `json:"clusterIP,omitempty"` +} + +// RouteConfig defines OpenShift Route configuration. +type RouteConfig struct { + // Enable the OpenShift Route for this endpoint. + // When disabled, no Route resource will be created for this endpoint. + // When not specified, the operator will determine the best networking option for your cluster. + Enabled bool `json:"enabled,omitempty"` + + // Annotations to add to the OpenShift Route resource. + // Useful for configuring route-specific behavior and TLS settings. + Annotations map[string]string `json:"annotations,omitempty"` + + // Labels to add to the OpenShift Route resource. + // Useful for monitoring, cost allocation, and resource organization. + Labels map[string]string `json:"labels,omitempty"` +} + +// IngressConfig defines Kubernetes Ingress configuration. +type IngressConfig struct { + // Enable the Kubernetes Ingress for this endpoint. + // When disabled, no Ingress resource will be created for this endpoint. + // When not specified, the operator will determine the best networking option for your cluster. + Enabled bool `json:"enabled,omitempty"` + + // Ingress class name for the Kubernetes Ingress. + // Specifies which ingress controller should handle this ingress. + // +kubebuilder:default="default" + Class string `json:"class,omitempty"` + + // Annotations to add to the Kubernetes Ingress resource. + // Useful for configuring ingress-specific behavior, TLS settings, and load balancer options. + Annotations map[string]string `json:"annotations,omitempty"` + + // Labels to add to the Kubernetes Ingress resource. + // Useful for monitoring, cost allocation, and resource organization. + Labels map[string]string `json:"labels,omitempty"` +} + +// NodePortConfig defines Kubernetes NodePort service configuration. +type NodePortConfig struct { + // Enable the NodePort service for this endpoint. + // When disabled, no NodePort service will be created for this endpoint. + // When not specified, the operator will determine the best networking option for your cluster. + Enabled bool `json:"enabled,omitempty"` + + // NodePort port number to expose on each node. + // Must be in the range 30000-32767 for most Kubernetes clusters. + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=65535 + Port int32 `json:"port,omitempty"` + + // Annotations to add to the NodePort service. + // Useful for configuring service-specific behavior and load balancer options. + Annotations map[string]string `json:"annotations,omitempty"` + + // Labels to add to the NodePort service. + // Useful for monitoring, cost allocation, and resource organization. + Labels map[string]string `json:"labels,omitempty"` +} + +// LoadBalancerConfig defines Kubernetes LoadBalancer service configuration. +type LoadBalancerConfig struct { + // Enable the LoadBalancer service for this endpoint. + // When disabled, no LoadBalancer service will be created for this endpoint. + // When not specified, the operator will determine the best networking option for your cluster. + Enabled bool `json:"enabled,omitempty"` + + // Port number for the LoadBalancer service. + // Must be a valid port number (1-65535). + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=65535 + Port int32 `json:"port,omitempty"` + + // Annotations to add to the LoadBalancer service. + // Useful for configuring cloud provider-specific load balancer options. + // Example: "service.beta.kubernetes.io/aws-load-balancer-type: nlb" + Annotations map[string]string `json:"annotations,omitempty"` + + // Labels to add to the LoadBalancer service. + // Useful for monitoring, cost allocation, and resource organization. + Labels map[string]string `json:"labels,omitempty"` +} + +// ClusterIPConfig defines Kubernetes ClusterIP service configuration. +type ClusterIPConfig struct { + // Enable the ClusterIP service for this endpoint. + // When disabled, no ClusterIP service will be created for this endpoint. + Enabled bool `json:"enabled,omitempty"` + + // Annotations to add to the ClusterIP service. + // Useful for configuring service-specific behavior and load balancer options. + Annotations map[string]string `json:"annotations,omitempty"` + + // Labels to add to the ClusterIP service. + // Useful for monitoring, cost allocation, and resource organization. + Labels map[string]string `json:"labels,omitempty"` +} + +// JumpstarterStatus defines the observed state of Jumpstarter. +// This field is currently empty but can be extended to include status information +// such as deployment status, endpoint URLs, and health information. +type JumpstarterStatus struct { + // Jumpstarter deployment status +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Jumpstarter is the Schema for the jumpstarters API. +type Jumpstarter struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec JumpstarterSpec `json:"spec,omitempty"` + Status JumpstarterStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// JumpstarterList contains a list of Jumpstarter deployments. +// This is used by kubectl to list multiple Jumpstarter resources. +type JumpstarterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Jumpstarter `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Jumpstarter{}, &JumpstarterList{}) +} diff --git a/controller/deploy/operator/api/v1alpha1/zz_generated.deepcopy.go b/controller/deploy/operator/api/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..a6764a386 --- /dev/null +++ b/controller/deploy/operator/api/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,539 @@ +//go:build !ignore_autogenerated + +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apiserver/pkg/apis/apiserver/v1beta1" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationConfig) DeepCopyInto(out *AuthenticationConfig) { + *out = *in + in.Internal.DeepCopyInto(&out.Internal) + out.K8s = in.K8s + if in.JWT != nil { + in, out := &in.JWT, &out.JWT + *out = make([]v1beta1.JWTAuthenticator, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationConfig. +func (in *AuthenticationConfig) DeepCopy() *AuthenticationConfig { + if in == nil { + return nil + } + out := new(AuthenticationConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterIPConfig) DeepCopyInto(out *ClusterIPConfig) { + *out = *in + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterIPConfig. +func (in *ClusterIPConfig) DeepCopy() *ClusterIPConfig { + if in == nil { + return nil + } + out := new(ClusterIPConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerConfig) DeepCopyInto(out *ControllerConfig) { + *out = *in + in.Resources.DeepCopyInto(&out.Resources) + in.ExporterOptions.DeepCopyInto(&out.ExporterOptions) + in.RestAPI.DeepCopyInto(&out.RestAPI) + in.GRPC.DeepCopyInto(&out.GRPC) + in.Authentication.DeepCopyInto(&out.Authentication) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerConfig. +func (in *ControllerConfig) DeepCopy() *ControllerConfig { + if in == nil { + return nil + } + out := new(ControllerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Endpoint) DeepCopyInto(out *Endpoint) { + *out = *in + if in.Route != nil { + in, out := &in.Route, &out.Route + *out = new(RouteConfig) + (*in).DeepCopyInto(*out) + } + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = new(IngressConfig) + (*in).DeepCopyInto(*out) + } + if in.NodePort != nil { + in, out := &in.NodePort, &out.NodePort + *out = new(NodePortConfig) + (*in).DeepCopyInto(*out) + } + if in.LoadBalancer != nil { + in, out := &in.LoadBalancer, &out.LoadBalancer + *out = new(LoadBalancerConfig) + (*in).DeepCopyInto(*out) + } + if in.ClusterIP != nil { + in, out := &in.ClusterIP, &out.ClusterIP + *out = new(ClusterIPConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoint. +func (in *Endpoint) DeepCopy() *Endpoint { + if in == nil { + return nil + } + out := new(Endpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExporterOptions) DeepCopyInto(out *ExporterOptions) { + *out = *in + if in.OfflineTimeout != nil { + in, out := &in.OfflineTimeout, &out.OfflineTimeout + *out = new(metav1.Duration) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExporterOptions. +func (in *ExporterOptions) DeepCopy() *ExporterOptions { + if in == nil { + return nil + } + out := new(ExporterOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCConfig) DeepCopyInto(out *GRPCConfig) { + *out = *in + out.TLS = in.TLS + if in.Endpoints != nil { + in, out := &in.Endpoints, &out.Endpoints + *out = make([]Endpoint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Keepalive != nil { + in, out := &in.Keepalive, &out.Keepalive + *out = new(GRPCKeepaliveConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCConfig. +func (in *GRPCConfig) DeepCopy() *GRPCConfig { + if in == nil { + return nil + } + out := new(GRPCConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCKeepaliveConfig) DeepCopyInto(out *GRPCKeepaliveConfig) { + *out = *in + if in.MinTime != nil { + in, out := &in.MinTime, &out.MinTime + *out = new(metav1.Duration) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(metav1.Duration) + **out = **in + } + if in.MaxConnectionIdle != nil { + in, out := &in.MaxConnectionIdle, &out.MaxConnectionIdle + *out = new(metav1.Duration) + **out = **in + } + if in.MaxConnectionAge != nil { + in, out := &in.MaxConnectionAge, &out.MaxConnectionAge + *out = new(metav1.Duration) + **out = **in + } + if in.MaxConnectionAgeGrace != nil { + in, out := &in.MaxConnectionAgeGrace, &out.MaxConnectionAgeGrace + *out = new(metav1.Duration) + **out = **in + } + if in.IntervalTime != nil { + in, out := &in.IntervalTime, &out.IntervalTime + *out = new(metav1.Duration) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCKeepaliveConfig. +func (in *GRPCKeepaliveConfig) DeepCopy() *GRPCKeepaliveConfig { + if in == nil { + return nil + } + out := new(GRPCKeepaliveConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressConfig) DeepCopyInto(out *IngressConfig) { + *out = *in + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressConfig. +func (in *IngressConfig) DeepCopy() *IngressConfig { + if in == nil { + return nil + } + out := new(IngressConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InternalAuthConfig) DeepCopyInto(out *InternalAuthConfig) { + *out = *in + if in.TokenLifetime != nil { + in, out := &in.TokenLifetime, &out.TokenLifetime + *out = new(metav1.Duration) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InternalAuthConfig. +func (in *InternalAuthConfig) DeepCopy() *InternalAuthConfig { + if in == nil { + return nil + } + out := new(InternalAuthConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Jumpstarter) DeepCopyInto(out *Jumpstarter) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Jumpstarter. +func (in *Jumpstarter) DeepCopy() *Jumpstarter { + if in == nil { + return nil + } + out := new(Jumpstarter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Jumpstarter) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JumpstarterList) DeepCopyInto(out *JumpstarterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Jumpstarter, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JumpstarterList. +func (in *JumpstarterList) DeepCopy() *JumpstarterList { + if in == nil { + return nil + } + out := new(JumpstarterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *JumpstarterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JumpstarterSpec) DeepCopyInto(out *JumpstarterSpec) { + *out = *in + in.Controller.DeepCopyInto(&out.Controller) + in.Routers.DeepCopyInto(&out.Routers) + in.Authentication.DeepCopyInto(&out.Authentication) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JumpstarterSpec. +func (in *JumpstarterSpec) DeepCopy() *JumpstarterSpec { + if in == nil { + return nil + } + out := new(JumpstarterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JumpstarterStatus) DeepCopyInto(out *JumpstarterStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JumpstarterStatus. +func (in *JumpstarterStatus) DeepCopy() *JumpstarterStatus { + if in == nil { + return nil + } + out := new(JumpstarterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *K8sAuthConfig) DeepCopyInto(out *K8sAuthConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K8sAuthConfig. +func (in *K8sAuthConfig) DeepCopy() *K8sAuthConfig { + if in == nil { + return nil + } + out := new(K8sAuthConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBalancerConfig) DeepCopyInto(out *LoadBalancerConfig) { + *out = *in + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerConfig. +func (in *LoadBalancerConfig) DeepCopy() *LoadBalancerConfig { + if in == nil { + return nil + } + out := new(LoadBalancerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodePortConfig) DeepCopyInto(out *NodePortConfig) { + *out = *in + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePortConfig. +func (in *NodePortConfig) DeepCopy() *NodePortConfig { + if in == nil { + return nil + } + out := new(NodePortConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestAPIConfig) DeepCopyInto(out *RestAPIConfig) { + *out = *in + out.TLS = in.TLS + if in.Endpoints != nil { + in, out := &in.Endpoints, &out.Endpoints + *out = make([]Endpoint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestAPIConfig. +func (in *RestAPIConfig) DeepCopy() *RestAPIConfig { + if in == nil { + return nil + } + out := new(RestAPIConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteConfig) DeepCopyInto(out *RouteConfig) { + *out = *in + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteConfig. +func (in *RouteConfig) DeepCopy() *RouteConfig { + if in == nil { + return nil + } + out := new(RouteConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoutersConfig) DeepCopyInto(out *RoutersConfig) { + *out = *in + in.Resources.DeepCopyInto(&out.Resources) + if in.TopologySpreadConstraints != nil { + in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints + *out = make([]v1.TopologySpreadConstraint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.GRPC.DeepCopyInto(&out.GRPC) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutersConfig. +func (in *RoutersConfig) DeepCopy() *RoutersConfig { + if in == nil { + return nil + } + out := new(RoutersConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSConfig) DeepCopyInto(out *TLSConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSConfig. +func (in *TLSConfig) DeepCopy() *TLSConfig { + if in == nil { + return nil + } + out := new(TLSConfig) + in.DeepCopyInto(out) + return out +} diff --git a/controller/deploy/operator/bundle.Dockerfile b/controller/deploy/operator/bundle.Dockerfile new file mode 100644 index 000000000..5d5e689a8 --- /dev/null +++ b/controller/deploy/operator/bundle.Dockerfile @@ -0,0 +1,20 @@ +FROM scratch + +# Core bundle labels. +LABEL operators.operatorframework.io.bundle.mediatype.v1=registry+v1 +LABEL operators.operatorframework.io.bundle.manifests.v1=manifests/ +LABEL operators.operatorframework.io.bundle.metadata.v1=metadata/ +LABEL operators.operatorframework.io.bundle.package.v1=jumpstarter-operator +LABEL operators.operatorframework.io.bundle.channels.v1=alpha +LABEL operators.operatorframework.io.metrics.builder=operator-sdk-v1.41.1 +LABEL operators.operatorframework.io.metrics.mediatype.v1=metrics+v1 +LABEL operators.operatorframework.io.metrics.project_layout=go.kubebuilder.io/v4 + +# Labels for testing. +LABEL operators.operatorframework.io.test.mediatype.v1=scorecard+v1 +LABEL operators.operatorframework.io.test.config.v1=tests/scorecard/ + +# Copy files to locations specified by labels. +COPY bundle/manifests /manifests/ +COPY bundle/metadata /metadata/ +COPY bundle/tests/scorecard /tests/scorecard/ diff --git a/controller/deploy/operator/bundle/manifests/jumpstarter-operator-controller-manager-metrics-service_v1_service.yaml b/controller/deploy/operator/bundle/manifests/jumpstarter-operator-controller-manager-metrics-service_v1_service.yaml new file mode 100644 index 000000000..a6a7e3b84 --- /dev/null +++ b/controller/deploy/operator/bundle/manifests/jumpstarter-operator-controller-manager-metrics-service_v1_service.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +kind: Service +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: jumpstarter-operator + control-plane: controller-manager + name: jumpstarter-operator-controller-manager-metrics-service +spec: + ports: + - name: https + port: 8443 + protocol: TCP + targetPort: 8443 + selector: + app.kubernetes.io/name: jumpstarter-operator + control-plane: controller-manager +status: + loadBalancer: {} diff --git a/controller/deploy/operator/bundle/manifests/jumpstarter-operator-jumpstarter-admin-role_rbac.authorization.k8s.io_v1_clusterrole.yaml b/controller/deploy/operator/bundle/manifests/jumpstarter-operator-jumpstarter-admin-role_rbac.authorization.k8s.io_v1_clusterrole.yaml new file mode 100644 index 000000000..c8e11263d --- /dev/null +++ b/controller/deploy/operator/bundle/manifests/jumpstarter-operator-jumpstarter-admin-role_rbac.authorization.k8s.io_v1_clusterrole.yaml @@ -0,0 +1,21 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: jumpstarter-operator + name: jumpstarter-operator-jumpstarter-admin-role +rules: +- apiGroups: + - operator.jumpstarter.dev + resources: + - jumpstarters + verbs: + - '*' +- apiGroups: + - operator.jumpstarter.dev + resources: + - jumpstarters/status + verbs: + - get diff --git a/controller/deploy/operator/bundle/manifests/jumpstarter-operator-jumpstarter-editor-role_rbac.authorization.k8s.io_v1_clusterrole.yaml b/controller/deploy/operator/bundle/manifests/jumpstarter-operator-jumpstarter-editor-role_rbac.authorization.k8s.io_v1_clusterrole.yaml new file mode 100644 index 000000000..5bb9255f4 --- /dev/null +++ b/controller/deploy/operator/bundle/manifests/jumpstarter-operator-jumpstarter-editor-role_rbac.authorization.k8s.io_v1_clusterrole.yaml @@ -0,0 +1,27 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: jumpstarter-operator + name: jumpstarter-operator-jumpstarter-editor-role +rules: +- apiGroups: + - operator.jumpstarter.dev + resources: + - jumpstarters + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - operator.jumpstarter.dev + resources: + - jumpstarters/status + verbs: + - get diff --git a/controller/deploy/operator/bundle/manifests/jumpstarter-operator-jumpstarter-viewer-role_rbac.authorization.k8s.io_v1_clusterrole.yaml b/controller/deploy/operator/bundle/manifests/jumpstarter-operator-jumpstarter-viewer-role_rbac.authorization.k8s.io_v1_clusterrole.yaml new file mode 100644 index 000000000..df3c96e65 --- /dev/null +++ b/controller/deploy/operator/bundle/manifests/jumpstarter-operator-jumpstarter-viewer-role_rbac.authorization.k8s.io_v1_clusterrole.yaml @@ -0,0 +1,23 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: jumpstarter-operator + name: jumpstarter-operator-jumpstarter-viewer-role +rules: +- apiGroups: + - operator.jumpstarter.dev + resources: + - jumpstarters + verbs: + - get + - list + - watch +- apiGroups: + - operator.jumpstarter.dev + resources: + - jumpstarters/status + verbs: + - get diff --git a/controller/deploy/operator/bundle/manifests/jumpstarter-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml b/controller/deploy/operator/bundle/manifests/jumpstarter-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml new file mode 100644 index 000000000..262b65097 --- /dev/null +++ b/controller/deploy/operator/bundle/manifests/jumpstarter-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml @@ -0,0 +1,13 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: jumpstarter-operator + name: jumpstarter-operator-metrics-reader +rules: +- nonResourceURLs: + - /metrics + verbs: + - get diff --git a/controller/deploy/operator/bundle/manifests/jumpstarter-operator.clusterserviceversion.yaml b/controller/deploy/operator/bundle/manifests/jumpstarter-operator.clusterserviceversion.yaml new file mode 100644 index 000000000..0a29854a0 --- /dev/null +++ b/controller/deploy/operator/bundle/manifests/jumpstarter-operator.clusterserviceversion.yaml @@ -0,0 +1,402 @@ +apiVersion: operators.coreos.com/v1alpha1 +kind: ClusterServiceVersion +metadata: + annotations: + alm-examples: |- + [ + { + "apiVersion": "operator.jumpstarter.dev/v1alpha1", + "kind": "Jumpstarter", + "metadata": { + "labels": { + "app.kubernetes.io/managed-by": "kustomize", + "app.kubernetes.io/name": "jumpstarter-operator" + }, + "name": "jumpstarter-sample" + }, + "spec": null + } + ] + capabilities: Basic Install + createdAt: "2025-12-22T16:34:29Z" + operators.operatorframework.io/builder: operator-sdk-v1.41.1 + operators.operatorframework.io/project_layout: go.kubebuilder.io/v4 + name: jumpstarter-operator.v0.8.0 + namespace: placeholder +spec: + apiservicedefinitions: {} + customresourcedefinitions: + owned: + - kind: Client + name: clients.jumpstarter.dev + version: v1alpha1 + - kind: ExporterAccessPolicy + name: exporteraccesspolicies.jumpstarter.dev + version: v1alpha1 + - kind: Exporter + name: exporters.jumpstarter.dev + version: v1alpha1 + - description: Jumpstarter is the Schema for the jumpstarters API. + displayName: Jumpstarter + kind: Jumpstarter + name: jumpstarters.operator.jumpstarter.dev + version: v1alpha1 + - kind: Lease + name: leases.jumpstarter.dev + version: v1alpha1 + description: Jumpstarter is a cloud-native framework for Hardware-in-the-Loop (HIL) + automation that bridges the gap between embedded development workflows and real-world + deployment environments. This operator installs and manages the Jumpstarter Controller, + which acts as the central brain for your testing infrastructure. It orchestrates + secure, shared access to physical hardware and virtual devices (represented as + "exporters") directly from your Kubernetes or OpenShift cluster. + displayName: Jumpstarter Operator + icon: + - base64data: "" + mediatype: "" + install: + spec: + clusterPermissions: + - rules: + - apiGroups: + - "" + resources: + - configmaps + - secrets + - serviceaccounts + - services + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - apiGroups: + - "" + resources: + - services/status + verbs: + - get + - patch + - update + - apiGroups: + - apps + resources: + - deployments + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - apps + resources: + - deployments/status + verbs: + - get + - patch + - update + - apiGroups: + - config.openshift.io + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - jumpstarter.dev + resources: + - clients + - exporteraccesspolicies + - exporters + - leases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - jumpstarter.dev + resources: + - clients/finalizers + - exporteraccesspolicies/finalizers + - exporters/finalizers + - leases/finalizers + verbs: + - update + - apiGroups: + - jumpstarter.dev + resources: + - clients/status + - exporteraccesspolicies/status + - exporters/status + - leases/status + verbs: + - get + - patch + - update + - apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - get + - patch + - update + - apiGroups: + - operator.jumpstarter.dev + resources: + - jumpstarters + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - operator.jumpstarter.dev + resources: + - jumpstarters/finalizers + verbs: + - update + - apiGroups: + - operator.jumpstarter.dev + resources: + - jumpstarters/status + verbs: + - get + - patch + - update + - apiGroups: + - rbac.authorization.k8s.io + resources: + - rolebindings + - roles + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - route.openshift.io + resources: + - routes + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - route.openshift.io + resources: + - routes/custom-host + verbs: + - create + - get + - patch + - update + - apiGroups: + - route.openshift.io + resources: + - routes/status + verbs: + - get + - patch + - update + - apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create + - apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create + serviceAccountName: jumpstarter-operator-controller-manager + deployments: + - label: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: jumpstarter-operator + control-plane: controller-manager + name: jumpstarter-operator-controller-manager + spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: jumpstarter-operator + control-plane: controller-manager + strategy: {} + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: manager + labels: + app.kubernetes.io/name: jumpstarter-operator + control-plane: controller-manager + spec: + containers: + - args: + - --metrics-bind-address=:8443 + - --leader-elect + - --health-probe-bind-address=:8081 + command: + - /manager + image: quay.io/jumpstarter-dev/jumpstarter-operator:latest + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + name: manager + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + resources: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 10m + memory: 256Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: jumpstarter-operator-controller-manager + terminationGracePeriodSeconds: 10 + permissions: + - rules: + - apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + serviceAccountName: jumpstarter-operator-controller-manager + strategy: deployment + installModes: + - supported: false + type: OwnNamespace + - supported: false + type: SingleNamespace + - supported: false + type: MultiNamespace + - supported: true + type: AllNamespaces + keywords: + - hil + - hardware-in-the-loop + - hardware + - device + - embedded + - testing + - framework + links: + - name: Jumpstarter Operator + url: https://jumpstarter.dev/main/getting-started/installation/service/index.html + maintainers: + - email: majopela@redhat.com + name: Miguel Angel Ajo + maturity: alpha + provider: + name: The Jumpstarter Community + url: https://jumpstarter.dev + version: 0.8.0 diff --git a/controller/deploy/operator/bundle/manifests/jumpstarter.dev_clients.yaml b/controller/deploy/operator/bundle/manifests/jumpstarter.dev_clients.yaml new file mode 100644 index 000000000..399fc3272 --- /dev/null +++ b/controller/deploy/operator/bundle/manifests/jumpstarter.dev_clients.yaml @@ -0,0 +1,75 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.3 + creationTimestamp: null + name: clients.jumpstarter.dev +spec: + group: jumpstarter.dev + names: + kind: Client + listKind: ClientList + plural: clients + singular: client + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: Client is the Schema for the identities API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ClientSpec defines the desired state of Identity + properties: + username: + type: string + type: object + status: + description: ClientStatus defines the observed state of Identity + properties: + credential: + description: Status field for the clients + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + endpoint: + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: null diff --git a/controller/deploy/operator/bundle/manifests/jumpstarter.dev_exporteraccesspolicies.yaml b/controller/deploy/operator/bundle/manifests/jumpstarter.dev_exporteraccesspolicies.yaml new file mode 100644 index 000000000..d4d27fb04 --- /dev/null +++ b/controller/deploy/operator/bundle/manifests/jumpstarter.dev_exporteraccesspolicies.yaml @@ -0,0 +1,172 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.3 + creationTimestamp: null + name: exporteraccesspolicies.jumpstarter.dev +spec: + group: jumpstarter.dev + names: + kind: ExporterAccessPolicy + listKind: ExporterAccessPolicyList + plural: exporteraccesspolicies + singular: exporteraccesspolicy + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: ExporterAccessPolicy is the Schema for the exporteraccesspolicies + API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ExporterAccessPolicySpec defines the desired state of ExporterAccessPolicy. + properties: + exporterSelector: + description: |- + A label selector is a label query over a set of resources. The result of matchLabels and + matchExpressions are ANDed. An empty label selector matches all objects. A null + label selector matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + policies: + items: + properties: + from: + items: + properties: + clientSelector: + description: |- + A label selector is a label query over a set of resources. The result of matchLabels and + matchExpressions are ANDed. An empty label selector matches all objects. A null + label selector matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: object + type: array + maximumDuration: + type: string + priority: + type: integer + spotAccess: + type: boolean + type: object + type: array + type: object + status: + description: ExporterAccessPolicyStatus defines the observed state of + ExporterAccessPolicy. + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: null diff --git a/controller/deploy/operator/bundle/manifests/jumpstarter.dev_exporters.yaml b/controller/deploy/operator/bundle/manifests/jumpstarter.dev_exporters.yaml new file mode 100644 index 000000000..c111bd68d --- /dev/null +++ b/controller/deploy/operator/bundle/manifests/jumpstarter.dev_exporters.yaml @@ -0,0 +1,166 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.3 + creationTimestamp: null + name: exporters.jumpstarter.dev +spec: + group: jumpstarter.dev + names: + kind: Exporter + listKind: ExporterList + plural: exporters + singular: exporter + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: Exporter is the Schema for the exporters API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ExporterSpec defines the desired state of Exporter + properties: + username: + type: string + type: object + status: + description: ExporterStatus defines the observed state of Exporter + properties: + conditions: + description: Exporter status fields + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + credential: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + devices: + items: + properties: + labels: + additionalProperties: + type: string + type: object + parent_uuid: + type: string + uuid: + type: string + type: object + type: array + endpoint: + type: string + lastSeen: + format: date-time + type: string + leaseRef: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: null diff --git a/controller/deploy/operator/bundle/manifests/jumpstarter.dev_leases.yaml b/controller/deploy/operator/bundle/manifests/jumpstarter.dev_leases.yaml new file mode 100644 index 000000000..2dfde9bbd --- /dev/null +++ b/controller/deploy/operator/bundle/manifests/jumpstarter.dev_leases.yaml @@ -0,0 +1,241 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.3 + creationTimestamp: null + name: leases.jumpstarter.dev +spec: + group: jumpstarter.dev + names: + kind: Lease + listKind: LeaseList + plural: leases + singular: lease + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.ended + name: Ended + type: boolean + - jsonPath: .spec.clientRef.name + name: Client + type: string + - jsonPath: .status.exporterRef.name + name: Exporter + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: Lease is the Schema for the exporters API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: LeaseSpec defines the desired state of Lease + properties: + beginTime: + description: |- + Requested start time. If omitted, lease starts when exporter is acquired. + Immutable after lease starts (cannot change the past). + format: date-time + type: string + clientRef: + description: The client that is requesting the lease + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + duration: + description: |- + Duration of the lease. Must be positive when provided. + Can be omitted (nil) when both BeginTime and EndTime are provided, + in which case it's calculated as EndTime - BeginTime. + type: string + endTime: + description: |- + Requested end time. If specified with BeginTime, Duration is calculated. + Can be updated to extend or shorten active leases. + format: date-time + type: string + release: + description: The release flag requests the controller to end the lease + now + type: boolean + selector: + description: The selector for the exporter to be used + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + required: + - clientRef + - selector + type: object + status: + description: LeaseStatus defines the observed state of Lease + properties: + beginTime: + description: |- + If the lease has been acquired an exporter name is assigned + and then it can be used, it will be empty while still pending + format: date-time + type: string + conditions: + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + endTime: + format: date-time + type: string + ended: + type: boolean + exporterRef: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + priority: + type: integer + spotAccess: + type: boolean + required: + - ended + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: null diff --git a/controller/deploy/operator/bundle/manifests/operator.jumpstarter.dev_jumpstarters.yaml b/controller/deploy/operator/bundle/manifests/operator.jumpstarter.dev_jumpstarters.yaml new file mode 100644 index 000000000..50443c424 --- /dev/null +++ b/controller/deploy/operator/bundle/manifests/operator.jumpstarter.dev_jumpstarters.yaml @@ -0,0 +1,1918 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + creationTimestamp: null + name: jumpstarters.operator.jumpstarter.dev +spec: + group: operator.jumpstarter.dev + names: + kind: Jumpstarter + listKind: JumpstarterList + plural: jumpstarters + singular: jumpstarter + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: Jumpstarter is the Schema for the jumpstarters API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + JumpstarterSpec defines the desired state of a Jumpstarter deployment. A deployment + can be created in a namespace of the cluster, and that's where all the Jumpstarter + resources and services will reside. + properties: + authentication: + description: |- + Authentication configuration for client and exporter authentication. + Supports multiple authentication methods including internal tokens, Kubernetes tokens, and JWT. + properties: + internal: + description: |- + Internal authentication configuration. + Built-in authenticator that issues tokens for clients and exporters. + This is the simplest authentication method and is enabled by default. + properties: + enabled: + default: true + description: |- + Enable the internal authentication method. + When disabled, clients cannot use internal tokens for authentication. + type: boolean + prefix: + default: 'internal:' + description: |- + Prefix to add to the subject claim of issued tokens. + Helps distinguish internal tokens from other authentication methods. + Example: "internal:" will result in subjects like "internal:user123" + maxLength: 50 + type: string + tokenLifetime: + default: 43800h + description: |- + Token validity duration for issued tokens. + After this duration, tokens expire and must be renewed. + type: string + type: object + jwt: + description: |- + JWT authentication configuration. + Enables authentication using external JWT tokens from OIDC providers. + Supports multiple JWT authenticators for different identity providers. + items: + description: JWTAuthenticator provides the configuration for + a single JWT authenticator. + properties: + claimMappings: + description: claimMappings points claims of a token to be + treated as user attributes. + properties: + extra: + description: |- + extra represents an option for the extra attribute. + expression must produce a string or string array value. + If the value is empty, the extra mapping will not be present. + + hard-coded extra key/value + - key: "foo" + valueExpression: "'bar'" + This will result in an extra attribute - foo: ["bar"] + + hard-coded key, value copying claim value + - key: "foo" + valueExpression: "claims.some_claim" + This will result in an extra attribute - foo: [value of some_claim] + + hard-coded key, value derived from claim value + - key: "admin" + valueExpression: '(has(claims.is_admin) && claims.is_admin) ? "true":""' + This will result in: + - if is_admin claim is present and true, extra attribute - admin: ["true"] + - if is_admin claim is present and false or is_admin claim is not present, no extra attribute will be added + items: + description: ExtraMapping provides the configuration + for a single extra mapping. + properties: + key: + description: |- + key is a string to use as the extra attribute key. + key must be a domain-prefix path (e.g. example.org/foo). All characters before the first "/" must be a valid + subdomain as defined by RFC 1123. All characters trailing the first "/" must + be valid HTTP Path characters as defined by RFC 3986. + key must be lowercase. + Required to be unique. + type: string + valueExpression: + description: |- + valueExpression is a CEL expression to extract extra attribute value. + valueExpression must produce a string or string array value. + "", [], and null values are treated as the extra mapping not being present. + Empty string values contained within a string array are filtered out. + + CEL expressions have access to the contents of the token claims, organized into CEL variable: + - 'claims' is a map of claim names to claim values. + For example, a variable named 'sub' can be accessed as 'claims.sub'. + Nested claims can be accessed using dot notation, e.g. 'claims.foo.bar'. + + Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + type: string + required: + - key + - valueExpression + type: object + type: array + groups: + description: |- + groups represents an option for the groups attribute. + The claim's value must be a string or string array claim. + If groups.claim is set, the prefix must be specified (and can be the empty string). + If groups.expression is set, the expression must produce a string or string array value. + "", [], and null values are treated as the group mapping not being present. + properties: + claim: + description: |- + claim is the JWT claim to use. + Mutually exclusive with expression. + type: string + expression: + description: |- + expression represents the expression which will be evaluated by CEL. + + CEL expressions have access to the contents of the token claims, organized into CEL variable: + - 'claims' is a map of claim names to claim values. + For example, a variable named 'sub' can be accessed as 'claims.sub'. + Nested claims can be accessed using dot notation, e.g. 'claims.foo.bar'. + + Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + + Mutually exclusive with claim and prefix. + type: string + prefix: + description: |- + prefix is prepended to claim's value to prevent clashes with existing names. + prefix needs to be set if claim is set and can be the empty string. + Mutually exclusive with expression. + type: string + type: object + uid: + description: |- + uid represents an option for the uid attribute. + Claim must be a singular string claim. + If uid.expression is set, the expression must produce a string value. + properties: + claim: + description: |- + claim is the JWT claim to use. + Either claim or expression must be set. + Mutually exclusive with expression. + type: string + expression: + description: |- + expression represents the expression which will be evaluated by CEL. + + CEL expressions have access to the contents of the token claims, organized into CEL variable: + - 'claims' is a map of claim names to claim values. + For example, a variable named 'sub' can be accessed as 'claims.sub'. + Nested claims can be accessed using dot notation, e.g. 'claims.foo.bar'. + + Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + + Mutually exclusive with claim. + type: string + type: object + username: + description: |- + username represents an option for the username attribute. + The claim's value must be a singular string. + Same as the --oidc-username-claim and --oidc-username-prefix flags. + If username.expression is set, the expression must produce a string value. + If username.expression uses 'claims.email', then 'claims.email_verified' must be used in + username.expression or extra[*].valueExpression or claimValidationRules[*].expression. + An example claim validation rule expression that matches the validation automatically + applied when username.claim is set to 'email' is 'claims.?email_verified.orValue(true) == true'. By explicitly comparing + the value to true, we let type-checking see the result will be a boolean, and to make sure a non-boolean email_verified + claim will be caught at runtime. + + In the flag based approach, the --oidc-username-claim and --oidc-username-prefix are optional. If --oidc-username-claim is not set, + the default value is "sub". For the authentication config, there is no defaulting for claim or prefix. The claim and prefix must be set explicitly. + For claim, if --oidc-username-claim was not set with legacy flag approach, configure username.claim="sub" in the authentication config. + For prefix: + (1) --oidc-username-prefix="-", no prefix was added to the username. For the same behavior using authentication config, + set username.prefix="" + (2) --oidc-username-prefix="" and --oidc-username-claim != "email", prefix was "#". For the same + behavior using authentication config, set username.prefix="#" + (3) --oidc-username-prefix="". For the same behavior using authentication config, set username.prefix="" + properties: + claim: + description: |- + claim is the JWT claim to use. + Mutually exclusive with expression. + type: string + expression: + description: |- + expression represents the expression which will be evaluated by CEL. + + CEL expressions have access to the contents of the token claims, organized into CEL variable: + - 'claims' is a map of claim names to claim values. + For example, a variable named 'sub' can be accessed as 'claims.sub'. + Nested claims can be accessed using dot notation, e.g. 'claims.foo.bar'. + + Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + + Mutually exclusive with claim and prefix. + type: string + prefix: + description: |- + prefix is prepended to claim's value to prevent clashes with existing names. + prefix needs to be set if claim is set and can be the empty string. + Mutually exclusive with expression. + type: string + type: object + required: + - username + type: object + claimValidationRules: + description: claimValidationRules are rules that are applied + to validate token claims to authenticate users. + items: + description: ClaimValidationRule provides the configuration + for a single claim validation rule. + properties: + claim: + description: |- + claim is the name of a required claim. + Same as --oidc-required-claim flag. + Only string claim keys are supported. + Mutually exclusive with expression and message. + type: string + expression: + description: |- + expression represents the expression which will be evaluated by CEL. + Must produce a boolean. + + CEL expressions have access to the contents of the token claims, organized into CEL variable: + - 'claims' is a map of claim names to claim values. + For example, a variable named 'sub' can be accessed as 'claims.sub'. + Nested claims can be accessed using dot notation, e.g. 'claims.foo.bar'. + Must return true for the validation to pass. + + Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + + Mutually exclusive with claim and requiredValue. + type: string + message: + description: |- + message customizes the returned error message when expression returns false. + message is a literal string. + Mutually exclusive with claim and requiredValue. + type: string + requiredValue: + description: |- + requiredValue is the value of a required claim. + Same as --oidc-required-claim flag. + Only string claim values are supported. + If claim is set and requiredValue is not set, the claim must be present with a value set to the empty string. + Mutually exclusive with expression and message. + type: string + type: object + type: array + issuer: + description: issuer contains the basic OIDC provider connection + options. + properties: + audienceMatchPolicy: + description: |- + audienceMatchPolicy defines how the "audiences" field is used to match the "aud" claim in the presented JWT. + Allowed values are: + 1. "MatchAny" when multiple audiences are specified and + 2. empty (or unset) or "MatchAny" when a single audience is specified. + + - MatchAny: the "aud" claim in the presented JWT must match at least one of the entries in the "audiences" field. + For example, if "audiences" is ["foo", "bar"], the "aud" claim in the presented JWT must contain either "foo" or "bar" (and may contain both). + + - "": The match policy can be empty (or unset) when a single audience is specified in the "audiences" field. The "aud" claim in the presented JWT must contain the single audience (and may contain others). + + For more nuanced audience validation, use claimValidationRules. + example: claimValidationRule[].expression: 'sets.equivalent(claims.aud, ["bar", "foo", "baz"])' to require an exact match. + type: string + audiences: + description: |- + audiences is the set of acceptable audiences the JWT must be issued to. + At least one of the entries must match the "aud" claim in presented JWTs. + Same value as the --oidc-client-id flag (though this field supports an array). + Required to be non-empty. + items: + type: string + type: array + certificateAuthority: + description: |- + certificateAuthority contains PEM-encoded certificate authority certificates + used to validate the connection when fetching discovery information. + If unset, the system verifier is used. + Same value as the content of the file referenced by the --oidc-ca-file flag. + type: string + discoveryURL: + description: |- + discoveryURL, if specified, overrides the URL used to fetch discovery + information instead of using "{url}/.well-known/openid-configuration". + The exact value specified is used, so "/.well-known/openid-configuration" + must be included in discoveryURL if needed. + + The "issuer" field in the fetched discovery information must match the "issuer.url" field + in the AuthenticationConfiguration and will be used to validate the "iss" claim in the presented JWT. + This is for scenarios where the well-known and jwks endpoints are hosted at a different + location than the issuer (such as locally in the cluster). + + Example: + A discovery url that is exposed using kubernetes service 'oidc' in namespace 'oidc-namespace' + and discovery information is available at '/.well-known/openid-configuration'. + discoveryURL: "https://oidc.oidc-namespace/.well-known/openid-configuration" + certificateAuthority is used to verify the TLS connection and the hostname on the leaf certificate + must be set to 'oidc.oidc-namespace'. + + curl https://oidc.oidc-namespace/.well-known/openid-configuration (.discoveryURL field) + { + issuer: "https://oidc.example.com" (.url field) + } + + discoveryURL must be different from url. + Required to be unique across all JWT authenticators. + Note that egress selection configuration is not used for this network connection. + type: string + url: + description: |- + url points to the issuer URL in a format https://url or https://url/path. + This must match the "iss" claim in the presented JWT, and the issuer returned from discovery. + Same value as the --oidc-issuer-url flag. + Discovery information is fetched from "{url}/.well-known/openid-configuration" unless overridden by discoveryURL. + Required to be unique across all JWT authenticators. + Note that egress selection configuration is not used for this network connection. + type: string + required: + - audiences + - url + type: object + userValidationRules: + description: |- + userValidationRules are rules that are applied to final user before completing authentication. + These allow invariants to be applied to incoming identities such as preventing the + use of the system: prefix that is commonly used by Kubernetes components. + The validation rules are logically ANDed together and must all return true for the validation to pass. + items: + description: UserValidationRule provides the configuration + for a single user info validation rule. + properties: + expression: + description: |- + expression represents the expression which will be evaluated by CEL. + Must return true for the validation to pass. + + CEL expressions have access to the contents of UserInfo, organized into CEL variable: + - 'user' - authentication.k8s.io/v1, Kind=UserInfo object + Refer to https://github.com/kubernetes/api/blob/release-1.28/authentication/v1/types.go#L105-L122 for the definition. + API documentation: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.28/#userinfo-v1-authentication-k8s-io + + Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + type: string + message: + description: |- + message customizes the returned error message when rule returns false. + message is a literal string. + type: string + required: + - expression + type: object + type: array + required: + - claimMappings + - issuer + type: object + type: array + k8s: + description: |- + Kubernetes authentication configuration. + Enables authentication using Kubernetes service account tokens. + Useful for integrating with existing Kubernetes RBAC policies. + properties: + enabled: + default: false + description: |- + Enable Kubernetes authentication. + When enabled, clients can authenticate using Kubernetes service account tokens. + type: boolean + type: object + type: object + baseDomain: + description: |- + Base domain used to construct FQDNs for all service endpoints. + This domain will be used to generate the default hostnames for Routes, Ingresses, and certificates. + Example: "example.com" will generate endpoints like "grpc.example.com", "router.example.com" + pattern: ^[a-z0-9]([a-z0-9\-\.]*[a-z0-9])?$ + type: string + controller: + default: {} + description: |- + Controller configuration for the main Jumpstarter API and gRPC services. + The controller handles gRPC and REST API requests from clients and exporters. + properties: + authentication: + description: |- + Authentication configuration for client and exporter authentication. + Configures how clients and exporters can authenticate with Jumpstarter. + Supports multiple authentication methods including internal tokens, Kubernetes tokens, and JWT. + properties: + internal: + description: |- + Internal authentication configuration. + Built-in authenticator that issues tokens for clients and exporters. + This is the simplest authentication method and is enabled by default. + properties: + enabled: + default: true + description: |- + Enable the internal authentication method. + When disabled, clients cannot use internal tokens for authentication. + type: boolean + prefix: + default: 'internal:' + description: |- + Prefix to add to the subject claim of issued tokens. + Helps distinguish internal tokens from other authentication methods. + Example: "internal:" will result in subjects like "internal:user123" + maxLength: 50 + type: string + tokenLifetime: + default: 43800h + description: |- + Token validity duration for issued tokens. + After this duration, tokens expire and must be renewed. + type: string + type: object + jwt: + description: |- + JWT authentication configuration. + Enables authentication using external JWT tokens from OIDC providers. + Supports multiple JWT authenticators for different identity providers. + items: + description: JWTAuthenticator provides the configuration + for a single JWT authenticator. + properties: + claimMappings: + description: claimMappings points claims of a token + to be treated as user attributes. + properties: + extra: + description: |- + extra represents an option for the extra attribute. + expression must produce a string or string array value. + If the value is empty, the extra mapping will not be present. + + hard-coded extra key/value + - key: "foo" + valueExpression: "'bar'" + This will result in an extra attribute - foo: ["bar"] + + hard-coded key, value copying claim value + - key: "foo" + valueExpression: "claims.some_claim" + This will result in an extra attribute - foo: [value of some_claim] + + hard-coded key, value derived from claim value + - key: "admin" + valueExpression: '(has(claims.is_admin) && claims.is_admin) ? "true":""' + This will result in: + - if is_admin claim is present and true, extra attribute - admin: ["true"] + - if is_admin claim is present and false or is_admin claim is not present, no extra attribute will be added + items: + description: ExtraMapping provides the configuration + for a single extra mapping. + properties: + key: + description: |- + key is a string to use as the extra attribute key. + key must be a domain-prefix path (e.g. example.org/foo). All characters before the first "/" must be a valid + subdomain as defined by RFC 1123. All characters trailing the first "/" must + be valid HTTP Path characters as defined by RFC 3986. + key must be lowercase. + Required to be unique. + type: string + valueExpression: + description: |- + valueExpression is a CEL expression to extract extra attribute value. + valueExpression must produce a string or string array value. + "", [], and null values are treated as the extra mapping not being present. + Empty string values contained within a string array are filtered out. + + CEL expressions have access to the contents of the token claims, organized into CEL variable: + - 'claims' is a map of claim names to claim values. + For example, a variable named 'sub' can be accessed as 'claims.sub'. + Nested claims can be accessed using dot notation, e.g. 'claims.foo.bar'. + + Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + type: string + required: + - key + - valueExpression + type: object + type: array + groups: + description: |- + groups represents an option for the groups attribute. + The claim's value must be a string or string array claim. + If groups.claim is set, the prefix must be specified (and can be the empty string). + If groups.expression is set, the expression must produce a string or string array value. + "", [], and null values are treated as the group mapping not being present. + properties: + claim: + description: |- + claim is the JWT claim to use. + Mutually exclusive with expression. + type: string + expression: + description: |- + expression represents the expression which will be evaluated by CEL. + + CEL expressions have access to the contents of the token claims, organized into CEL variable: + - 'claims' is a map of claim names to claim values. + For example, a variable named 'sub' can be accessed as 'claims.sub'. + Nested claims can be accessed using dot notation, e.g. 'claims.foo.bar'. + + Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + + Mutually exclusive with claim and prefix. + type: string + prefix: + description: |- + prefix is prepended to claim's value to prevent clashes with existing names. + prefix needs to be set if claim is set and can be the empty string. + Mutually exclusive with expression. + type: string + type: object + uid: + description: |- + uid represents an option for the uid attribute. + Claim must be a singular string claim. + If uid.expression is set, the expression must produce a string value. + properties: + claim: + description: |- + claim is the JWT claim to use. + Either claim or expression must be set. + Mutually exclusive with expression. + type: string + expression: + description: |- + expression represents the expression which will be evaluated by CEL. + + CEL expressions have access to the contents of the token claims, organized into CEL variable: + - 'claims' is a map of claim names to claim values. + For example, a variable named 'sub' can be accessed as 'claims.sub'. + Nested claims can be accessed using dot notation, e.g. 'claims.foo.bar'. + + Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + + Mutually exclusive with claim. + type: string + type: object + username: + description: |- + username represents an option for the username attribute. + The claim's value must be a singular string. + Same as the --oidc-username-claim and --oidc-username-prefix flags. + If username.expression is set, the expression must produce a string value. + If username.expression uses 'claims.email', then 'claims.email_verified' must be used in + username.expression or extra[*].valueExpression or claimValidationRules[*].expression. + An example claim validation rule expression that matches the validation automatically + applied when username.claim is set to 'email' is 'claims.?email_verified.orValue(true) == true'. By explicitly comparing + the value to true, we let type-checking see the result will be a boolean, and to make sure a non-boolean email_verified + claim will be caught at runtime. + + In the flag based approach, the --oidc-username-claim and --oidc-username-prefix are optional. If --oidc-username-claim is not set, + the default value is "sub". For the authentication config, there is no defaulting for claim or prefix. The claim and prefix must be set explicitly. + For claim, if --oidc-username-claim was not set with legacy flag approach, configure username.claim="sub" in the authentication config. + For prefix: + (1) --oidc-username-prefix="-", no prefix was added to the username. For the same behavior using authentication config, + set username.prefix="" + (2) --oidc-username-prefix="" and --oidc-username-claim != "email", prefix was "#". For the same + behavior using authentication config, set username.prefix="#" + (3) --oidc-username-prefix="". For the same behavior using authentication config, set username.prefix="" + properties: + claim: + description: |- + claim is the JWT claim to use. + Mutually exclusive with expression. + type: string + expression: + description: |- + expression represents the expression which will be evaluated by CEL. + + CEL expressions have access to the contents of the token claims, organized into CEL variable: + - 'claims' is a map of claim names to claim values. + For example, a variable named 'sub' can be accessed as 'claims.sub'. + Nested claims can be accessed using dot notation, e.g. 'claims.foo.bar'. + + Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + + Mutually exclusive with claim and prefix. + type: string + prefix: + description: |- + prefix is prepended to claim's value to prevent clashes with existing names. + prefix needs to be set if claim is set and can be the empty string. + Mutually exclusive with expression. + type: string + type: object + required: + - username + type: object + claimValidationRules: + description: claimValidationRules are rules that are + applied to validate token claims to authenticate users. + items: + description: ClaimValidationRule provides the configuration + for a single claim validation rule. + properties: + claim: + description: |- + claim is the name of a required claim. + Same as --oidc-required-claim flag. + Only string claim keys are supported. + Mutually exclusive with expression and message. + type: string + expression: + description: |- + expression represents the expression which will be evaluated by CEL. + Must produce a boolean. + + CEL expressions have access to the contents of the token claims, organized into CEL variable: + - 'claims' is a map of claim names to claim values. + For example, a variable named 'sub' can be accessed as 'claims.sub'. + Nested claims can be accessed using dot notation, e.g. 'claims.foo.bar'. + Must return true for the validation to pass. + + Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + + Mutually exclusive with claim and requiredValue. + type: string + message: + description: |- + message customizes the returned error message when expression returns false. + message is a literal string. + Mutually exclusive with claim and requiredValue. + type: string + requiredValue: + description: |- + requiredValue is the value of a required claim. + Same as --oidc-required-claim flag. + Only string claim values are supported. + If claim is set and requiredValue is not set, the claim must be present with a value set to the empty string. + Mutually exclusive with expression and message. + type: string + type: object + type: array + issuer: + description: issuer contains the basic OIDC provider + connection options. + properties: + audienceMatchPolicy: + description: |- + audienceMatchPolicy defines how the "audiences" field is used to match the "aud" claim in the presented JWT. + Allowed values are: + 1. "MatchAny" when multiple audiences are specified and + 2. empty (or unset) or "MatchAny" when a single audience is specified. + + - MatchAny: the "aud" claim in the presented JWT must match at least one of the entries in the "audiences" field. + For example, if "audiences" is ["foo", "bar"], the "aud" claim in the presented JWT must contain either "foo" or "bar" (and may contain both). + + - "": The match policy can be empty (or unset) when a single audience is specified in the "audiences" field. The "aud" claim in the presented JWT must contain the single audience (and may contain others). + + For more nuanced audience validation, use claimValidationRules. + example: claimValidationRule[].expression: 'sets.equivalent(claims.aud, ["bar", "foo", "baz"])' to require an exact match. + type: string + audiences: + description: |- + audiences is the set of acceptable audiences the JWT must be issued to. + At least one of the entries must match the "aud" claim in presented JWTs. + Same value as the --oidc-client-id flag (though this field supports an array). + Required to be non-empty. + items: + type: string + type: array + certificateAuthority: + description: |- + certificateAuthority contains PEM-encoded certificate authority certificates + used to validate the connection when fetching discovery information. + If unset, the system verifier is used. + Same value as the content of the file referenced by the --oidc-ca-file flag. + type: string + discoveryURL: + description: |- + discoveryURL, if specified, overrides the URL used to fetch discovery + information instead of using "{url}/.well-known/openid-configuration". + The exact value specified is used, so "/.well-known/openid-configuration" + must be included in discoveryURL if needed. + + The "issuer" field in the fetched discovery information must match the "issuer.url" field + in the AuthenticationConfiguration and will be used to validate the "iss" claim in the presented JWT. + This is for scenarios where the well-known and jwks endpoints are hosted at a different + location than the issuer (such as locally in the cluster). + + Example: + A discovery url that is exposed using kubernetes service 'oidc' in namespace 'oidc-namespace' + and discovery information is available at '/.well-known/openid-configuration'. + discoveryURL: "https://oidc.oidc-namespace/.well-known/openid-configuration" + certificateAuthority is used to verify the TLS connection and the hostname on the leaf certificate + must be set to 'oidc.oidc-namespace'. + + curl https://oidc.oidc-namespace/.well-known/openid-configuration (.discoveryURL field) + { + issuer: "https://oidc.example.com" (.url field) + } + + discoveryURL must be different from url. + Required to be unique across all JWT authenticators. + Note that egress selection configuration is not used for this network connection. + type: string + url: + description: |- + url points to the issuer URL in a format https://url or https://url/path. + This must match the "iss" claim in the presented JWT, and the issuer returned from discovery. + Same value as the --oidc-issuer-url flag. + Discovery information is fetched from "{url}/.well-known/openid-configuration" unless overridden by discoveryURL. + Required to be unique across all JWT authenticators. + Note that egress selection configuration is not used for this network connection. + type: string + required: + - audiences + - url + type: object + userValidationRules: + description: |- + userValidationRules are rules that are applied to final user before completing authentication. + These allow invariants to be applied to incoming identities such as preventing the + use of the system: prefix that is commonly used by Kubernetes components. + The validation rules are logically ANDed together and must all return true for the validation to pass. + items: + description: UserValidationRule provides the configuration + for a single user info validation rule. + properties: + expression: + description: |- + expression represents the expression which will be evaluated by CEL. + Must return true for the validation to pass. + + CEL expressions have access to the contents of UserInfo, organized into CEL variable: + - 'user' - authentication.k8s.io/v1, Kind=UserInfo object + Refer to https://github.com/kubernetes/api/blob/release-1.28/authentication/v1/types.go#L105-L122 for the definition. + API documentation: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.28/#userinfo-v1-authentication-k8s-io + + Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + type: string + message: + description: |- + message customizes the returned error message when rule returns false. + message is a literal string. + type: string + required: + - expression + type: object + type: array + required: + - claimMappings + - issuer + type: object + type: array + k8s: + description: |- + Kubernetes authentication configuration. + Enables authentication using Kubernetes service account tokens. + Useful for integrating with existing Kubernetes RBAC policies. + properties: + enabled: + default: false + description: |- + Enable Kubernetes authentication. + When enabled, clients can authenticate using Kubernetes service account tokens. + type: boolean + type: object + type: object + exporterOptions: + description: |- + Exporter options configuration. + Controls how exporters connect and behave when communicating with the controller. + properties: + offlineTimeout: + default: 180s + description: |- + Offline timeout duration for exporters. + After this duration without communication, an exporter is considered offline. + This drives the online/offline status field of exporters, and offline exporters + won't be considered for leases. + type: string + type: object + grpc: + description: |- + gRPC configuration for controller endpoints. + Defines how controller gRPC services are exposed and configured. + properties: + endpoints: + description: |- + List of gRPC endpoints to expose. + Each endpoint can use different networking methods (Route, Ingress, NodePort, or LoadBalancer) + based on your cluster setup. Example: Use Route for OpenShift, Ingress for standard Kubernetes. + items: + description: |- + Endpoint defines a single endpoint configuration. + An endpoint can use one or more networking methods: Route, Ingress, NodePort, or LoadBalancer. + Multiple methods can be configured simultaneously for the same address. + properties: + address: + description: |- + Address for this endpoint in the format "hostname", "hostname:port", "IPv4", "IPv4:port", "[IPv6]", or "[IPv6]:port". + Required for Route and Ingress endpoints. Optional for NodePort and LoadBalancer endpoints. + When optional, the address is used for certificate generation and DNS resolution. + Supports templating with $(replica) for replica-specific addresses. + Examples: "grpc.example.com", "grpc.example.com:9090", "192.168.1.1:8080", "[2001:db8::1]:8443", "router-$(replica).example.com" + pattern: ^(\[[0-9a-fA-F:\.]+\]|[0-9]+(\.[0-9]+){3}|[a-z0-9$]([a-z0-9\-\.\$\(\)]*[a-z0-9\)])?)(:[0-9]+)?$ + type: string + clusterIP: + description: |- + ClusterIP configuration for internal service access. + Creates a ClusterIP service for this endpoint. + Useful for internal service-to-service communication or when + using a different method to expose the service externally. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations to add to the ClusterIP service. + Useful for configuring service-specific behavior and load balancer options. + type: object + enabled: + description: |- + Enable the ClusterIP service for this endpoint. + When disabled, no ClusterIP service will be created for this endpoint. + type: boolean + labels: + additionalProperties: + type: string + description: |- + Labels to add to the ClusterIP service. + Useful for monitoring, cost allocation, and resource organization. + type: object + type: object + ingress: + description: |- + Ingress configuration for standard Kubernetes clusters. + Creates an Ingress resource for this endpoint. + Requires an ingress controller to be installed. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations to add to the Kubernetes Ingress resource. + Useful for configuring ingress-specific behavior, TLS settings, and load balancer options. + type: object + class: + default: default + description: |- + Ingress class name for the Kubernetes Ingress. + Specifies which ingress controller should handle this ingress. + type: string + enabled: + description: |- + Enable the Kubernetes Ingress for this endpoint. + When disabled, no Ingress resource will be created for this endpoint. + When not specified, the operator will determine the best networking option for your cluster. + type: boolean + labels: + additionalProperties: + type: string + description: |- + Labels to add to the Kubernetes Ingress resource. + Useful for monitoring, cost allocation, and resource organization. + type: object + type: object + loadBalancer: + description: |- + LoadBalancer configuration for cloud environments. + Creates a LoadBalancer service for this endpoint. + Requires cloud provider support for LoadBalancer services. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations to add to the LoadBalancer service. + Useful for configuring cloud provider-specific load balancer options. + Example: "service.beta.kubernetes.io/aws-load-balancer-type: nlb" + type: object + enabled: + description: |- + Enable the LoadBalancer service for this endpoint. + When disabled, no LoadBalancer service will be created for this endpoint. + When not specified, the operator will determine the best networking option for your cluster. + type: boolean + labels: + additionalProperties: + type: string + description: |- + Labels to add to the LoadBalancer service. + Useful for monitoring, cost allocation, and resource organization. + type: object + port: + description: |- + Port number for the LoadBalancer service. + Must be a valid port number (1-65535). + format: int32 + maximum: 65535 + minimum: 1 + type: integer + type: object + nodeport: + description: |- + NodePort configuration for direct node access. + Exposes the service on a specific port on each node. + Useful for bare-metal or simple cluster setups. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations to add to the NodePort service. + Useful for configuring service-specific behavior and load balancer options. + type: object + enabled: + description: |- + Enable the NodePort service for this endpoint. + When disabled, no NodePort service will be created for this endpoint. + When not specified, the operator will determine the best networking option for your cluster. + type: boolean + labels: + additionalProperties: + type: string + description: |- + Labels to add to the NodePort service. + Useful for monitoring, cost allocation, and resource organization. + type: object + port: + description: |- + NodePort port number to expose on each node. + Must be in the range 30000-32767 for most Kubernetes clusters. + format: int32 + maximum: 65535 + minimum: 1 + type: integer + type: object + route: + description: |- + Route configuration for OpenShift clusters. + Creates an OpenShift Route resource for this endpoint. + Only applicable in OpenShift environments. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations to add to the OpenShift Route resource. + Useful for configuring route-specific behavior and TLS settings. + type: object + enabled: + description: |- + Enable the OpenShift Route for this endpoint. + When disabled, no Route resource will be created for this endpoint. + When not specified, the operator will determine the best networking option for your cluster. + type: boolean + labels: + additionalProperties: + type: string + description: |- + Labels to add to the OpenShift Route resource. + Useful for monitoring, cost allocation, and resource organization. + type: object + type: object + type: object + type: array + keepalive: + description: |- + Keepalive configuration for gRPC connections. + Controls connection health checks and idle connection management. + Helps maintain stable connections in load-balanced environments. + properties: + intervalTime: + default: 10s + description: |- + Interval between keepalive pings. + How often to send keepalive pings to check connection health. This is important + to keep TCP gRPC connections alive when traversing load balancers and proxies. + type: string + maxConnectionAge: + description: |- + Maximum age of a connection before it is closed and recreated. + Helps prevent issues with long-lived connections. It defaults to infinity. + type: string + maxConnectionAgeGrace: + description: |- + Grace period for closing connections that exceed MaxConnectionAge. + Allows ongoing RPCs to complete before closing the connection. + type: string + maxConnectionIdle: + description: |- + Maximum time a connection can remain idle before being closed. + It defaults to infinity. + type: string + minTime: + default: 1s + description: |- + Minimum time between keepalives that the connection will accept, under this threshold + the other side will get a GOAWAY signal. + Prevents excessive keepalive traffic on the network. + type: string + permitWithoutStream: + default: true + description: |- + Allow keepalive pings even when there are no active RPC streams. + Useful for detecting connection issues in idle connections. + This is important to keep TCP gRPC connections alive when traversing + load balancers and proxies. + type: boolean + timeout: + default: 180s + description: |- + Timeout for keepalive ping acknowledgment. + If a ping is not acknowledged within this time, the connection is considered broken. + The default is high to avoid issues when the network on a exporter is overloaded, i.e. + during flashing. + type: string + type: object + tls: + description: |- + TLS configuration for secure gRPC communication. + Requires a Kubernetes secret containing the TLS certificate and private key. + If useCertManager is enabled, this secret will be automatically created. + See also: spec.useCertManager for automatic certificate management. + properties: + certSecret: + description: |- + Name of the Kubernetes secret containing the TLS certificate and private key. + The secret must contain 'tls.crt' and 'tls.key' keys. + If useCertManager is enabled, this secret will be automatically created. + pattern: ^[a-z0-9]([a-z0-9\-\.]*[a-z0-9])?$ + type: string + type: object + type: object + image: + default: quay.io/jumpstarter-dev/jumpstarter-controller:latest + description: |- + Container image for the controller pods in 'registry/repository/image:tag' format. + If not specified, defaults to the latest stable version of the Jumpstarter controller. + type: string + imagePullPolicy: + default: IfNotPresent + description: |- + Image pull policy for the controller container. + Controls when the container image should be pulled from the registry. + enum: + - Always + - IfNotPresent + - Never + type: string + replicas: + default: 2 + description: |- + Number of controller replicas to run. + Must be a positive integer. Minimum recommended value is 2 for high availability. + format: int32 + minimum: 1 + type: integer + resources: + description: |- + Resource requirements for controller pods. + Defines CPU and memory requests and limits for each controller pod. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restApi: + description: |- + REST API configuration for HTTP-based clients. + Enables non-gRPC clients to interact with Jumpstarter for listing leases, + managing exporters, and creating new leases. Use this when you need HTTP/JSON access. + properties: + endpoints: + description: |- + List of REST API endpoints to expose. + Each endpoint can use different networking methods (Route, Ingress, NodePort, or LoadBalancer) + based on your cluster setup. + items: + description: |- + Endpoint defines a single endpoint configuration. + An endpoint can use one or more networking methods: Route, Ingress, NodePort, or LoadBalancer. + Multiple methods can be configured simultaneously for the same address. + properties: + address: + description: |- + Address for this endpoint in the format "hostname", "hostname:port", "IPv4", "IPv4:port", "[IPv6]", or "[IPv6]:port". + Required for Route and Ingress endpoints. Optional for NodePort and LoadBalancer endpoints. + When optional, the address is used for certificate generation and DNS resolution. + Supports templating with $(replica) for replica-specific addresses. + Examples: "grpc.example.com", "grpc.example.com:9090", "192.168.1.1:8080", "[2001:db8::1]:8443", "router-$(replica).example.com" + pattern: ^(\[[0-9a-fA-F:\.]+\]|[0-9]+(\.[0-9]+){3}|[a-z0-9$]([a-z0-9\-\.\$\(\)]*[a-z0-9\)])?)(:[0-9]+)?$ + type: string + clusterIP: + description: |- + ClusterIP configuration for internal service access. + Creates a ClusterIP service for this endpoint. + Useful for internal service-to-service communication or when + using a different method to expose the service externally. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations to add to the ClusterIP service. + Useful for configuring service-specific behavior and load balancer options. + type: object + enabled: + description: |- + Enable the ClusterIP service for this endpoint. + When disabled, no ClusterIP service will be created for this endpoint. + type: boolean + labels: + additionalProperties: + type: string + description: |- + Labels to add to the ClusterIP service. + Useful for monitoring, cost allocation, and resource organization. + type: object + type: object + ingress: + description: |- + Ingress configuration for standard Kubernetes clusters. + Creates an Ingress resource for this endpoint. + Requires an ingress controller to be installed. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations to add to the Kubernetes Ingress resource. + Useful for configuring ingress-specific behavior, TLS settings, and load balancer options. + type: object + class: + default: default + description: |- + Ingress class name for the Kubernetes Ingress. + Specifies which ingress controller should handle this ingress. + type: string + enabled: + description: |- + Enable the Kubernetes Ingress for this endpoint. + When disabled, no Ingress resource will be created for this endpoint. + When not specified, the operator will determine the best networking option for your cluster. + type: boolean + labels: + additionalProperties: + type: string + description: |- + Labels to add to the Kubernetes Ingress resource. + Useful for monitoring, cost allocation, and resource organization. + type: object + type: object + loadBalancer: + description: |- + LoadBalancer configuration for cloud environments. + Creates a LoadBalancer service for this endpoint. + Requires cloud provider support for LoadBalancer services. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations to add to the LoadBalancer service. + Useful for configuring cloud provider-specific load balancer options. + Example: "service.beta.kubernetes.io/aws-load-balancer-type: nlb" + type: object + enabled: + description: |- + Enable the LoadBalancer service for this endpoint. + When disabled, no LoadBalancer service will be created for this endpoint. + When not specified, the operator will determine the best networking option for your cluster. + type: boolean + labels: + additionalProperties: + type: string + description: |- + Labels to add to the LoadBalancer service. + Useful for monitoring, cost allocation, and resource organization. + type: object + port: + description: |- + Port number for the LoadBalancer service. + Must be a valid port number (1-65535). + format: int32 + maximum: 65535 + minimum: 1 + type: integer + type: object + nodeport: + description: |- + NodePort configuration for direct node access. + Exposes the service on a specific port on each node. + Useful for bare-metal or simple cluster setups. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations to add to the NodePort service. + Useful for configuring service-specific behavior and load balancer options. + type: object + enabled: + description: |- + Enable the NodePort service for this endpoint. + When disabled, no NodePort service will be created for this endpoint. + When not specified, the operator will determine the best networking option for your cluster. + type: boolean + labels: + additionalProperties: + type: string + description: |- + Labels to add to the NodePort service. + Useful for monitoring, cost allocation, and resource organization. + type: object + port: + description: |- + NodePort port number to expose on each node. + Must be in the range 30000-32767 for most Kubernetes clusters. + format: int32 + maximum: 65535 + minimum: 1 + type: integer + type: object + route: + description: |- + Route configuration for OpenShift clusters. + Creates an OpenShift Route resource for this endpoint. + Only applicable in OpenShift environments. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations to add to the OpenShift Route resource. + Useful for configuring route-specific behavior and TLS settings. + type: object + enabled: + description: |- + Enable the OpenShift Route for this endpoint. + When disabled, no Route resource will be created for this endpoint. + When not specified, the operator will determine the best networking option for your cluster. + type: boolean + labels: + additionalProperties: + type: string + description: |- + Labels to add to the OpenShift Route resource. + Useful for monitoring, cost allocation, and resource organization. + type: object + type: object + type: object + type: array + tls: + description: |- + TLS configuration for secure HTTP communication. + Requires a Kubernetes secret containing the TLS certificate and private key. + properties: + certSecret: + description: |- + Name of the Kubernetes secret containing the TLS certificate and private key. + The secret must contain 'tls.crt' and 'tls.key' keys. + If useCertManager is enabled, this secret will be automatically created. + pattern: ^[a-z0-9]([a-z0-9\-\.]*[a-z0-9])?$ + type: string + type: object + type: object + type: object + routers: + default: {} + description: |- + Router configuration for the Jumpstarter router service. + Routers handle gRPC traffic routing and load balancing. + properties: + grpc: + description: |- + gRPC configuration for router endpoints. + Defines how router gRPC services are exposed and configured. + properties: + endpoints: + description: |- + List of gRPC endpoints to expose. + Each endpoint can use different networking methods (Route, Ingress, NodePort, or LoadBalancer) + based on your cluster setup. Example: Use Route for OpenShift, Ingress for standard Kubernetes. + items: + description: |- + Endpoint defines a single endpoint configuration. + An endpoint can use one or more networking methods: Route, Ingress, NodePort, or LoadBalancer. + Multiple methods can be configured simultaneously for the same address. + properties: + address: + description: |- + Address for this endpoint in the format "hostname", "hostname:port", "IPv4", "IPv4:port", "[IPv6]", or "[IPv6]:port". + Required for Route and Ingress endpoints. Optional for NodePort and LoadBalancer endpoints. + When optional, the address is used for certificate generation and DNS resolution. + Supports templating with $(replica) for replica-specific addresses. + Examples: "grpc.example.com", "grpc.example.com:9090", "192.168.1.1:8080", "[2001:db8::1]:8443", "router-$(replica).example.com" + pattern: ^(\[[0-9a-fA-F:\.]+\]|[0-9]+(\.[0-9]+){3}|[a-z0-9$]([a-z0-9\-\.\$\(\)]*[a-z0-9\)])?)(:[0-9]+)?$ + type: string + clusterIP: + description: |- + ClusterIP configuration for internal service access. + Creates a ClusterIP service for this endpoint. + Useful for internal service-to-service communication or when + using a different method to expose the service externally. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations to add to the ClusterIP service. + Useful for configuring service-specific behavior and load balancer options. + type: object + enabled: + description: |- + Enable the ClusterIP service for this endpoint. + When disabled, no ClusterIP service will be created for this endpoint. + type: boolean + labels: + additionalProperties: + type: string + description: |- + Labels to add to the ClusterIP service. + Useful for monitoring, cost allocation, and resource organization. + type: object + type: object + ingress: + description: |- + Ingress configuration for standard Kubernetes clusters. + Creates an Ingress resource for this endpoint. + Requires an ingress controller to be installed. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations to add to the Kubernetes Ingress resource. + Useful for configuring ingress-specific behavior, TLS settings, and load balancer options. + type: object + class: + default: default + description: |- + Ingress class name for the Kubernetes Ingress. + Specifies which ingress controller should handle this ingress. + type: string + enabled: + description: |- + Enable the Kubernetes Ingress for this endpoint. + When disabled, no Ingress resource will be created for this endpoint. + When not specified, the operator will determine the best networking option for your cluster. + type: boolean + labels: + additionalProperties: + type: string + description: |- + Labels to add to the Kubernetes Ingress resource. + Useful for monitoring, cost allocation, and resource organization. + type: object + type: object + loadBalancer: + description: |- + LoadBalancer configuration for cloud environments. + Creates a LoadBalancer service for this endpoint. + Requires cloud provider support for LoadBalancer services. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations to add to the LoadBalancer service. + Useful for configuring cloud provider-specific load balancer options. + Example: "service.beta.kubernetes.io/aws-load-balancer-type: nlb" + type: object + enabled: + description: |- + Enable the LoadBalancer service for this endpoint. + When disabled, no LoadBalancer service will be created for this endpoint. + When not specified, the operator will determine the best networking option for your cluster. + type: boolean + labels: + additionalProperties: + type: string + description: |- + Labels to add to the LoadBalancer service. + Useful for monitoring, cost allocation, and resource organization. + type: object + port: + description: |- + Port number for the LoadBalancer service. + Must be a valid port number (1-65535). + format: int32 + maximum: 65535 + minimum: 1 + type: integer + type: object + nodeport: + description: |- + NodePort configuration for direct node access. + Exposes the service on a specific port on each node. + Useful for bare-metal or simple cluster setups. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations to add to the NodePort service. + Useful for configuring service-specific behavior and load balancer options. + type: object + enabled: + description: |- + Enable the NodePort service for this endpoint. + When disabled, no NodePort service will be created for this endpoint. + When not specified, the operator will determine the best networking option for your cluster. + type: boolean + labels: + additionalProperties: + type: string + description: |- + Labels to add to the NodePort service. + Useful for monitoring, cost allocation, and resource organization. + type: object + port: + description: |- + NodePort port number to expose on each node. + Must be in the range 30000-32767 for most Kubernetes clusters. + format: int32 + maximum: 65535 + minimum: 1 + type: integer + type: object + route: + description: |- + Route configuration for OpenShift clusters. + Creates an OpenShift Route resource for this endpoint. + Only applicable in OpenShift environments. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations to add to the OpenShift Route resource. + Useful for configuring route-specific behavior and TLS settings. + type: object + enabled: + description: |- + Enable the OpenShift Route for this endpoint. + When disabled, no Route resource will be created for this endpoint. + When not specified, the operator will determine the best networking option for your cluster. + type: boolean + labels: + additionalProperties: + type: string + description: |- + Labels to add to the OpenShift Route resource. + Useful for monitoring, cost allocation, and resource organization. + type: object + type: object + type: object + type: array + keepalive: + description: |- + Keepalive configuration for gRPC connections. + Controls connection health checks and idle connection management. + Helps maintain stable connections in load-balanced environments. + properties: + intervalTime: + default: 10s + description: |- + Interval between keepalive pings. + How often to send keepalive pings to check connection health. This is important + to keep TCP gRPC connections alive when traversing load balancers and proxies. + type: string + maxConnectionAge: + description: |- + Maximum age of a connection before it is closed and recreated. + Helps prevent issues with long-lived connections. It defaults to infinity. + type: string + maxConnectionAgeGrace: + description: |- + Grace period for closing connections that exceed MaxConnectionAge. + Allows ongoing RPCs to complete before closing the connection. + type: string + maxConnectionIdle: + description: |- + Maximum time a connection can remain idle before being closed. + It defaults to infinity. + type: string + minTime: + default: 1s + description: |- + Minimum time between keepalives that the connection will accept, under this threshold + the other side will get a GOAWAY signal. + Prevents excessive keepalive traffic on the network. + type: string + permitWithoutStream: + default: true + description: |- + Allow keepalive pings even when there are no active RPC streams. + Useful for detecting connection issues in idle connections. + This is important to keep TCP gRPC connections alive when traversing + load balancers and proxies. + type: boolean + timeout: + default: 180s + description: |- + Timeout for keepalive ping acknowledgment. + If a ping is not acknowledged within this time, the connection is considered broken. + The default is high to avoid issues when the network on a exporter is overloaded, i.e. + during flashing. + type: string + type: object + tls: + description: |- + TLS configuration for secure gRPC communication. + Requires a Kubernetes secret containing the TLS certificate and private key. + If useCertManager is enabled, this secret will be automatically created. + See also: spec.useCertManager for automatic certificate management. + properties: + certSecret: + description: |- + Name of the Kubernetes secret containing the TLS certificate and private key. + The secret must contain 'tls.crt' and 'tls.key' keys. + If useCertManager is enabled, this secret will be automatically created. + pattern: ^[a-z0-9]([a-z0-9\-\.]*[a-z0-9])?$ + type: string + type: object + type: object + image: + default: quay.io/jumpstarter-dev/jumpstarter-controller:latest + description: |- + Container image for the router pods in 'registry/repository/image:tag' format. + If not specified, defaults to the latest stable version of the Jumpstarter router. + type: string + imagePullPolicy: + default: IfNotPresent + description: |- + Image pull policy for the router container. + Controls when the container image should be pulled from the registry. + enum: + - Always + - IfNotPresent + - Never + type: string + replicas: + default: 3 + description: |- + Number of router replicas to run. + Must be a positive integer. Minimum recommended value is 3 for high availability. + format: int32 + minimum: 1 + type: integer + resources: + description: |- + Resource requirements for router pods. + Defines CPU and memory requests and limits for each router pod. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + topologySpreadConstraints: + description: |- + Topology spread constraints for router pod distribution. + Ensures router pods are distributed evenly across nodes and zones. + Useful for high availability and fault tolerance. + items: + description: TopologySpreadConstraint specifies how to spread + matching pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + type: object + useCertManager: + default: true + description: |- + Enable automatic TLS certificate management using cert-manager. + When enabled, jumpstarter will interact with cert-manager to automatically provision + and renew TLS certificates for all endpoints. Requires cert-manager to be installed in the cluster. + type: boolean + type: object + status: + description: |- + JumpstarterStatus defines the observed state of Jumpstarter. + This field is currently empty but can be extended to include status information + such as deployment status, endpoint URLs, and health information. + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: null diff --git a/controller/deploy/operator/bundle/metadata/annotations.yaml b/controller/deploy/operator/bundle/metadata/annotations.yaml new file mode 100644 index 000000000..bf1f6fb85 --- /dev/null +++ b/controller/deploy/operator/bundle/metadata/annotations.yaml @@ -0,0 +1,14 @@ +annotations: + # Core bundle annotations. + operators.operatorframework.io.bundle.mediatype.v1: registry+v1 + operators.operatorframework.io.bundle.manifests.v1: manifests/ + operators.operatorframework.io.bundle.metadata.v1: metadata/ + operators.operatorframework.io.bundle.package.v1: jumpstarter-operator + operators.operatorframework.io.bundle.channels.v1: alpha + operators.operatorframework.io.metrics.builder: operator-sdk-v1.41.1 + operators.operatorframework.io.metrics.mediatype.v1: metrics+v1 + operators.operatorframework.io.metrics.project_layout: go.kubebuilder.io/v4 + + # Annotations for testing. + operators.operatorframework.io.test.mediatype.v1: scorecard+v1 + operators.operatorframework.io.test.config.v1: tests/scorecard/ diff --git a/controller/deploy/operator/bundle/tests/scorecard/config.yaml b/controller/deploy/operator/bundle/tests/scorecard/config.yaml new file mode 100644 index 000000000..6ffe8227f --- /dev/null +++ b/controller/deploy/operator/bundle/tests/scorecard/config.yaml @@ -0,0 +1,70 @@ +apiVersion: scorecard.operatorframework.io/v1alpha3 +kind: Configuration +metadata: + name: config +stages: +- parallel: true + tests: + - entrypoint: + - scorecard-test + - basic-check-spec + image: quay.io/operator-framework/scorecard-test:v1.41.1 + labels: + suite: basic + test: basic-check-spec-test + storage: + spec: + mountPath: {} + - entrypoint: + - scorecard-test + - olm-bundle-validation + image: quay.io/operator-framework/scorecard-test:v1.41.1 + labels: + suite: olm + test: olm-bundle-validation-test + storage: + spec: + mountPath: {} + - entrypoint: + - scorecard-test + - olm-crds-have-validation + image: quay.io/operator-framework/scorecard-test:v1.41.1 + labels: + suite: olm + test: olm-crds-have-validation-test + storage: + spec: + mountPath: {} + - entrypoint: + - scorecard-test + - olm-crds-have-resources + image: quay.io/operator-framework/scorecard-test:v1.41.1 + labels: + suite: olm + test: olm-crds-have-resources-test + storage: + spec: + mountPath: {} + - entrypoint: + - scorecard-test + - olm-spec-descriptors + image: quay.io/operator-framework/scorecard-test:v1.41.1 + labels: + suite: olm + test: olm-spec-descriptors-test + storage: + spec: + mountPath: {} + - entrypoint: + - scorecard-test + - olm-status-descriptors + image: quay.io/operator-framework/scorecard-test:v1.41.1 + labels: + suite: olm + test: olm-status-descriptors-test + storage: + spec: + mountPath: {} +storage: + spec: + mountPath: {} diff --git a/controller/deploy/operator/cmd/main.go b/controller/deploy/operator/cmd/main.go new file mode 100644 index 000000000..dd8e2a320 --- /dev/null +++ b/controller/deploy/operator/cmd/main.go @@ -0,0 +1,262 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "crypto/tls" + "flag" + "os" + "path/filepath" + + // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) + // to ensure that exec-entrypoint and run can make use of them. + _ "k8s.io/client-go/plugin/pkg/client/auth" + + routev1 "github.com/openshift/api/route/v1" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/certwatcher" + "sigs.k8s.io/controller-runtime/pkg/healthz" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + "sigs.k8s.io/controller-runtime/pkg/metrics/filters" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" + "sigs.k8s.io/controller-runtime/pkg/webhook" + + operatorv1alpha1 "github.com/jumpstarter-dev/jumpstarter-controller/deploy/operator/api/v1alpha1" + "github.com/jumpstarter-dev/jumpstarter-controller/deploy/operator/internal/controller/jumpstarter" + "github.com/jumpstarter-dev/jumpstarter-controller/deploy/operator/internal/controller/jumpstarter/endpoints" + // +kubebuilder:scaffold:imports +) + +var ( + scheme = runtime.NewScheme() + setupLog = ctrl.Log.WithName("setup") + + // Version information - set via ldflags at build time + version = "dev" + gitCommit = "unknown" + buildDate = "unknown" +) + +func init() { + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + + utilruntime.Must(operatorv1alpha1.AddToScheme(scheme)) + + // Register OpenShift Route API + utilruntime.Must(routev1.Install(scheme)) + // +kubebuilder:scaffold:scheme +} + +// nolint:gocyclo +func main() { + var metricsAddr string + var metricsCertPath, metricsCertName, metricsCertKey string + var webhookCertPath, webhookCertName, webhookCertKey string + var enableLeaderElection bool + var probeAddr string + var secureMetrics bool + var enableHTTP2 bool + var tlsOpts []func(*tls.Config) + flag.StringVar(&metricsAddr, "metrics-bind-address", "0", "The address the metrics endpoint binds to. "+ + "Use :8443 for HTTPS or :8080 for HTTP, or leave as 0 to disable the metrics service.") + flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") + flag.BoolVar(&enableLeaderElection, "leader-elect", false, + "Enable leader election for controller manager. "+ + "Enabling this will ensure there is only one active controller manager.") + flag.BoolVar(&secureMetrics, "metrics-secure", true, + "If set, the metrics endpoint is served securely via HTTPS. Use --metrics-secure=false to use HTTP instead.") + flag.StringVar(&webhookCertPath, "webhook-cert-path", "", "The directory that contains the webhook certificate.") + flag.StringVar(&webhookCertName, "webhook-cert-name", "tls.crt", "The name of the webhook certificate file.") + flag.StringVar(&webhookCertKey, "webhook-cert-key", "tls.key", "The name of the webhook key file.") + flag.StringVar(&metricsCertPath, "metrics-cert-path", "", + "The directory that contains the metrics server certificate.") + flag.StringVar(&metricsCertName, "metrics-cert-name", "tls.crt", "The name of the metrics server certificate file.") + flag.StringVar(&metricsCertKey, "metrics-cert-key", "tls.key", "The name of the metrics server key file.") + flag.BoolVar(&enableHTTP2, "enable-http2", false, + "If set, HTTP/2 will be enabled for the metrics and webhook servers") + opts := zap.Options{ + Development: true, + } + opts.BindFlags(flag.CommandLine) + flag.Parse() + + ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts))) + + // Print version information + setupLog.Info("Jumpstarter Operator starting", + "version", version, + "gitCommit", gitCommit, + "buildDate", buildDate, + ) + + // if the enable-http2 flag is false (the default), http/2 should be disabled + // due to its vulnerabilities. More specifically, disabling http/2 will + // prevent from being vulnerable to the HTTP/2 Stream Cancellation and + // Rapid Reset CVEs. For more information see: + // - https://github.com/advisories/GHSA-qppj-fm5r-hxr3 + // - https://github.com/advisories/GHSA-4374-p667-p6c8 + disableHTTP2 := func(c *tls.Config) { + setupLog.Info("disabling http/2") + c.NextProtos = []string{"http/1.1"} + } + + if !enableHTTP2 { + tlsOpts = append(tlsOpts, disableHTTP2) + } + + // Create watchers for metrics and webhooks certificates + var metricsCertWatcher, webhookCertWatcher *certwatcher.CertWatcher + + // Initial webhook TLS options + webhookTLSOpts := tlsOpts + + if len(webhookCertPath) > 0 { + setupLog.Info("Initializing webhook certificate watcher using provided certificates", + "webhook-cert-path", webhookCertPath, "webhook-cert-name", webhookCertName, "webhook-cert-key", webhookCertKey) + + var err error + webhookCertWatcher, err = certwatcher.New( + filepath.Join(webhookCertPath, webhookCertName), + filepath.Join(webhookCertPath, webhookCertKey), + ) + if err != nil { + setupLog.Error(err, "Failed to initialize webhook certificate watcher") + os.Exit(1) + } + + webhookTLSOpts = append(webhookTLSOpts, func(config *tls.Config) { + config.GetCertificate = webhookCertWatcher.GetCertificate + }) + } + + webhookServer := webhook.NewServer(webhook.Options{ + TLSOpts: webhookTLSOpts, + }) + + // Metrics endpoint is enabled in 'config/default/kustomization.yaml'. The Metrics options configure the server. + // More info: + // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.21.0/pkg/metrics/server + // - https://book.kubebuilder.io/reference/metrics.html + metricsServerOptions := metricsserver.Options{ + BindAddress: metricsAddr, + SecureServing: secureMetrics, + TLSOpts: tlsOpts, + } + + if secureMetrics { + // FilterProvider is used to protect the metrics endpoint with authn/authz. + // These configurations ensure that only authorized users and service accounts + // can access the metrics endpoint. The RBAC are configured in 'config/rbac/kustomization.yaml'. More info: + // https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.21.0/pkg/metrics/filters#WithAuthenticationAndAuthorization + metricsServerOptions.FilterProvider = filters.WithAuthenticationAndAuthorization + } + + // If the certificate is not specified, controller-runtime will automatically + // generate self-signed certificates for the metrics server. While convenient for development and testing, + // this setup is not recommended for production. + // + // TODO(user): If you enable certManager, uncomment the following lines: + // - [METRICS-WITH-CERTS] at config/default/kustomization.yaml to generate and use certificates + // managed by cert-manager for the metrics server. + // - [PROMETHEUS-WITH-CERTS] at config/prometheus/kustomization.yaml for TLS certification. + if len(metricsCertPath) > 0 { + setupLog.Info("Initializing metrics certificate watcher using provided certificates", + "metrics-cert-path", metricsCertPath, "metrics-cert-name", metricsCertName, "metrics-cert-key", metricsCertKey) + + var err error + metricsCertWatcher, err = certwatcher.New( + filepath.Join(metricsCertPath, metricsCertName), + filepath.Join(metricsCertPath, metricsCertKey), + ) + if err != nil { + setupLog.Error(err, "to initialize metrics certificate watcher", "error", err) + os.Exit(1) + } + + metricsServerOptions.TLSOpts = append(metricsServerOptions.TLSOpts, func(config *tls.Config) { + config.GetCertificate = metricsCertWatcher.GetCertificate + }) + } + + mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ + Scheme: scheme, + Metrics: metricsServerOptions, + WebhookServer: webhookServer, + HealthProbeBindAddress: probeAddr, + LeaderElection: enableLeaderElection, + LeaderElectionID: "88ed63ae.jumpstarter.dev", + // LeaderElectionReleaseOnCancel defines if the leader should step down voluntarily + // when the Manager ends. This requires the binary to immediately end when the + // Manager is stopped, otherwise, this setting is unsafe. Setting this significantly + // speeds up voluntary leader transitions as the new leader don't have to wait + // LeaseDuration time first. + // + // In the default scaffold provided, the program ends immediately after + // the manager stops, so would be fine to enable this option. However, + // if you are doing or is intended to do any operation such as perform cleanups + // after the manager stops then its usage might be unsafe. + // LeaderElectionReleaseOnCancel: true, + }) + if err != nil { + setupLog.Error(err, "unable to start manager") + os.Exit(1) + } + + if err := (&jumpstarter.JumpstarterReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + EndpointReconciler: endpoints.NewReconciler(mgr.GetClient(), mgr.GetScheme(), mgr.GetConfig()), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "Jumpstarter") + os.Exit(1) + } + // +kubebuilder:scaffold:builder + + if metricsCertWatcher != nil { + setupLog.Info("Adding metrics certificate watcher to manager") + if err := mgr.Add(metricsCertWatcher); err != nil { + setupLog.Error(err, "unable to add metrics certificate watcher to manager") + os.Exit(1) + } + } + + if webhookCertWatcher != nil { + setupLog.Info("Adding webhook certificate watcher to manager") + if err := mgr.Add(webhookCertWatcher); err != nil { + setupLog.Error(err, "unable to add webhook certificate watcher to manager") + os.Exit(1) + } + } + + if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { + setupLog.Error(err, "unable to set up health check") + os.Exit(1) + } + if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { + setupLog.Error(err, "unable to set up ready check") + os.Exit(1) + } + + setupLog.Info("starting manager") + if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { + setupLog.Error(err, "problem running manager") + os.Exit(1) + } +} diff --git a/controller/deploy/operator/config/crd/bases/jumpstarter.dev_clients.yaml b/controller/deploy/operator/config/crd/bases/jumpstarter.dev_clients.yaml new file mode 100644 index 000000000..d9dd6d0cb --- /dev/null +++ b/controller/deploy/operator/config/crd/bases/jumpstarter.dev_clients.yaml @@ -0,0 +1,69 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.3 + name: clients.jumpstarter.dev +spec: + group: jumpstarter.dev + names: + kind: Client + listKind: ClientList + plural: clients + singular: client + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: Client is the Schema for the identities API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ClientSpec defines the desired state of Identity + properties: + username: + type: string + type: object + status: + description: ClientStatus defines the observed state of Identity + properties: + credential: + description: Status field for the clients + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + endpoint: + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/controller/deploy/operator/config/crd/bases/jumpstarter.dev_exporteraccesspolicies.yaml b/controller/deploy/operator/config/crd/bases/jumpstarter.dev_exporteraccesspolicies.yaml new file mode 100644 index 000000000..ec1b7878c --- /dev/null +++ b/controller/deploy/operator/config/crd/bases/jumpstarter.dev_exporteraccesspolicies.yaml @@ -0,0 +1,166 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.3 + name: exporteraccesspolicies.jumpstarter.dev +spec: + group: jumpstarter.dev + names: + kind: ExporterAccessPolicy + listKind: ExporterAccessPolicyList + plural: exporteraccesspolicies + singular: exporteraccesspolicy + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: ExporterAccessPolicy is the Schema for the exporteraccesspolicies + API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ExporterAccessPolicySpec defines the desired state of ExporterAccessPolicy. + properties: + exporterSelector: + description: |- + A label selector is a label query over a set of resources. The result of matchLabels and + matchExpressions are ANDed. An empty label selector matches all objects. A null + label selector matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + policies: + items: + properties: + from: + items: + properties: + clientSelector: + description: |- + A label selector is a label query over a set of resources. The result of matchLabels and + matchExpressions are ANDed. An empty label selector matches all objects. A null + label selector matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: object + type: array + maximumDuration: + type: string + priority: + type: integer + spotAccess: + type: boolean + type: object + type: array + type: object + status: + description: ExporterAccessPolicyStatus defines the observed state of + ExporterAccessPolicy. + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/controller/deploy/operator/config/crd/bases/jumpstarter.dev_exporters.yaml b/controller/deploy/operator/config/crd/bases/jumpstarter.dev_exporters.yaml new file mode 100644 index 000000000..931c28b07 --- /dev/null +++ b/controller/deploy/operator/config/crd/bases/jumpstarter.dev_exporters.yaml @@ -0,0 +1,160 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.3 + name: exporters.jumpstarter.dev +spec: + group: jumpstarter.dev + names: + kind: Exporter + listKind: ExporterList + plural: exporters + singular: exporter + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: Exporter is the Schema for the exporters API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ExporterSpec defines the desired state of Exporter + properties: + username: + type: string + type: object + status: + description: ExporterStatus defines the observed state of Exporter + properties: + conditions: + description: Exporter status fields + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + credential: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + devices: + items: + properties: + labels: + additionalProperties: + type: string + type: object + parent_uuid: + type: string + uuid: + type: string + type: object + type: array + endpoint: + type: string + lastSeen: + format: date-time + type: string + leaseRef: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/controller/deploy/operator/config/crd/bases/jumpstarter.dev_leases.yaml b/controller/deploy/operator/config/crd/bases/jumpstarter.dev_leases.yaml new file mode 100644 index 000000000..9aafc8591 --- /dev/null +++ b/controller/deploy/operator/config/crd/bases/jumpstarter.dev_leases.yaml @@ -0,0 +1,235 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.3 + name: leases.jumpstarter.dev +spec: + group: jumpstarter.dev + names: + kind: Lease + listKind: LeaseList + plural: leases + singular: lease + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.ended + name: Ended + type: boolean + - jsonPath: .spec.clientRef.name + name: Client + type: string + - jsonPath: .status.exporterRef.name + name: Exporter + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: Lease is the Schema for the exporters API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: LeaseSpec defines the desired state of Lease + properties: + beginTime: + description: |- + Requested start time. If omitted, lease starts when exporter is acquired. + Immutable after lease starts (cannot change the past). + format: date-time + type: string + clientRef: + description: The client that is requesting the lease + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + duration: + description: |- + Duration of the lease. Must be positive when provided. + Can be omitted (nil) when both BeginTime and EndTime are provided, + in which case it's calculated as EndTime - BeginTime. + type: string + endTime: + description: |- + Requested end time. If specified with BeginTime, Duration is calculated. + Can be updated to extend or shorten active leases. + format: date-time + type: string + release: + description: The release flag requests the controller to end the lease + now + type: boolean + selector: + description: The selector for the exporter to be used + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + required: + - clientRef + - selector + type: object + status: + description: LeaseStatus defines the observed state of Lease + properties: + beginTime: + description: |- + If the lease has been acquired an exporter name is assigned + and then it can be used, it will be empty while still pending + format: date-time + type: string + conditions: + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + endTime: + format: date-time + type: string + ended: + type: boolean + exporterRef: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + priority: + type: integer + spotAccess: + type: boolean + required: + - ended + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/controller/deploy/operator/config/crd/bases/operator.jumpstarter.dev_jumpstarters.yaml b/controller/deploy/operator/config/crd/bases/operator.jumpstarter.dev_jumpstarters.yaml new file mode 100644 index 000000000..c6f7c6d8e --- /dev/null +++ b/controller/deploy/operator/config/crd/bases/operator.jumpstarter.dev_jumpstarters.yaml @@ -0,0 +1,1912 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: jumpstarters.operator.jumpstarter.dev +spec: + group: operator.jumpstarter.dev + names: + kind: Jumpstarter + listKind: JumpstarterList + plural: jumpstarters + singular: jumpstarter + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: Jumpstarter is the Schema for the jumpstarters API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + JumpstarterSpec defines the desired state of a Jumpstarter deployment. A deployment + can be created in a namespace of the cluster, and that's where all the Jumpstarter + resources and services will reside. + properties: + authentication: + description: |- + Authentication configuration for client and exporter authentication. + Supports multiple authentication methods including internal tokens, Kubernetes tokens, and JWT. + properties: + internal: + description: |- + Internal authentication configuration. + Built-in authenticator that issues tokens for clients and exporters. + This is the simplest authentication method and is enabled by default. + properties: + enabled: + default: true + description: |- + Enable the internal authentication method. + When disabled, clients cannot use internal tokens for authentication. + type: boolean + prefix: + default: 'internal:' + description: |- + Prefix to add to the subject claim of issued tokens. + Helps distinguish internal tokens from other authentication methods. + Example: "internal:" will result in subjects like "internal:user123" + maxLength: 50 + type: string + tokenLifetime: + default: 43800h + description: |- + Token validity duration for issued tokens. + After this duration, tokens expire and must be renewed. + type: string + type: object + jwt: + description: |- + JWT authentication configuration. + Enables authentication using external JWT tokens from OIDC providers. + Supports multiple JWT authenticators for different identity providers. + items: + description: JWTAuthenticator provides the configuration for + a single JWT authenticator. + properties: + claimMappings: + description: claimMappings points claims of a token to be + treated as user attributes. + properties: + extra: + description: |- + extra represents an option for the extra attribute. + expression must produce a string or string array value. + If the value is empty, the extra mapping will not be present. + + hard-coded extra key/value + - key: "foo" + valueExpression: "'bar'" + This will result in an extra attribute - foo: ["bar"] + + hard-coded key, value copying claim value + - key: "foo" + valueExpression: "claims.some_claim" + This will result in an extra attribute - foo: [value of some_claim] + + hard-coded key, value derived from claim value + - key: "admin" + valueExpression: '(has(claims.is_admin) && claims.is_admin) ? "true":""' + This will result in: + - if is_admin claim is present and true, extra attribute - admin: ["true"] + - if is_admin claim is present and false or is_admin claim is not present, no extra attribute will be added + items: + description: ExtraMapping provides the configuration + for a single extra mapping. + properties: + key: + description: |- + key is a string to use as the extra attribute key. + key must be a domain-prefix path (e.g. example.org/foo). All characters before the first "/" must be a valid + subdomain as defined by RFC 1123. All characters trailing the first "/" must + be valid HTTP Path characters as defined by RFC 3986. + key must be lowercase. + Required to be unique. + type: string + valueExpression: + description: |- + valueExpression is a CEL expression to extract extra attribute value. + valueExpression must produce a string or string array value. + "", [], and null values are treated as the extra mapping not being present. + Empty string values contained within a string array are filtered out. + + CEL expressions have access to the contents of the token claims, organized into CEL variable: + - 'claims' is a map of claim names to claim values. + For example, a variable named 'sub' can be accessed as 'claims.sub'. + Nested claims can be accessed using dot notation, e.g. 'claims.foo.bar'. + + Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + type: string + required: + - key + - valueExpression + type: object + type: array + groups: + description: |- + groups represents an option for the groups attribute. + The claim's value must be a string or string array claim. + If groups.claim is set, the prefix must be specified (and can be the empty string). + If groups.expression is set, the expression must produce a string or string array value. + "", [], and null values are treated as the group mapping not being present. + properties: + claim: + description: |- + claim is the JWT claim to use. + Mutually exclusive with expression. + type: string + expression: + description: |- + expression represents the expression which will be evaluated by CEL. + + CEL expressions have access to the contents of the token claims, organized into CEL variable: + - 'claims' is a map of claim names to claim values. + For example, a variable named 'sub' can be accessed as 'claims.sub'. + Nested claims can be accessed using dot notation, e.g. 'claims.foo.bar'. + + Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + + Mutually exclusive with claim and prefix. + type: string + prefix: + description: |- + prefix is prepended to claim's value to prevent clashes with existing names. + prefix needs to be set if claim is set and can be the empty string. + Mutually exclusive with expression. + type: string + type: object + uid: + description: |- + uid represents an option for the uid attribute. + Claim must be a singular string claim. + If uid.expression is set, the expression must produce a string value. + properties: + claim: + description: |- + claim is the JWT claim to use. + Either claim or expression must be set. + Mutually exclusive with expression. + type: string + expression: + description: |- + expression represents the expression which will be evaluated by CEL. + + CEL expressions have access to the contents of the token claims, organized into CEL variable: + - 'claims' is a map of claim names to claim values. + For example, a variable named 'sub' can be accessed as 'claims.sub'. + Nested claims can be accessed using dot notation, e.g. 'claims.foo.bar'. + + Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + + Mutually exclusive with claim. + type: string + type: object + username: + description: |- + username represents an option for the username attribute. + The claim's value must be a singular string. + Same as the --oidc-username-claim and --oidc-username-prefix flags. + If username.expression is set, the expression must produce a string value. + If username.expression uses 'claims.email', then 'claims.email_verified' must be used in + username.expression or extra[*].valueExpression or claimValidationRules[*].expression. + An example claim validation rule expression that matches the validation automatically + applied when username.claim is set to 'email' is 'claims.?email_verified.orValue(true) == true'. By explicitly comparing + the value to true, we let type-checking see the result will be a boolean, and to make sure a non-boolean email_verified + claim will be caught at runtime. + + In the flag based approach, the --oidc-username-claim and --oidc-username-prefix are optional. If --oidc-username-claim is not set, + the default value is "sub". For the authentication config, there is no defaulting for claim or prefix. The claim and prefix must be set explicitly. + For claim, if --oidc-username-claim was not set with legacy flag approach, configure username.claim="sub" in the authentication config. + For prefix: + (1) --oidc-username-prefix="-", no prefix was added to the username. For the same behavior using authentication config, + set username.prefix="" + (2) --oidc-username-prefix="" and --oidc-username-claim != "email", prefix was "#". For the same + behavior using authentication config, set username.prefix="#" + (3) --oidc-username-prefix="". For the same behavior using authentication config, set username.prefix="" + properties: + claim: + description: |- + claim is the JWT claim to use. + Mutually exclusive with expression. + type: string + expression: + description: |- + expression represents the expression which will be evaluated by CEL. + + CEL expressions have access to the contents of the token claims, organized into CEL variable: + - 'claims' is a map of claim names to claim values. + For example, a variable named 'sub' can be accessed as 'claims.sub'. + Nested claims can be accessed using dot notation, e.g. 'claims.foo.bar'. + + Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + + Mutually exclusive with claim and prefix. + type: string + prefix: + description: |- + prefix is prepended to claim's value to prevent clashes with existing names. + prefix needs to be set if claim is set and can be the empty string. + Mutually exclusive with expression. + type: string + type: object + required: + - username + type: object + claimValidationRules: + description: claimValidationRules are rules that are applied + to validate token claims to authenticate users. + items: + description: ClaimValidationRule provides the configuration + for a single claim validation rule. + properties: + claim: + description: |- + claim is the name of a required claim. + Same as --oidc-required-claim flag. + Only string claim keys are supported. + Mutually exclusive with expression and message. + type: string + expression: + description: |- + expression represents the expression which will be evaluated by CEL. + Must produce a boolean. + + CEL expressions have access to the contents of the token claims, organized into CEL variable: + - 'claims' is a map of claim names to claim values. + For example, a variable named 'sub' can be accessed as 'claims.sub'. + Nested claims can be accessed using dot notation, e.g. 'claims.foo.bar'. + Must return true for the validation to pass. + + Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + + Mutually exclusive with claim and requiredValue. + type: string + message: + description: |- + message customizes the returned error message when expression returns false. + message is a literal string. + Mutually exclusive with claim and requiredValue. + type: string + requiredValue: + description: |- + requiredValue is the value of a required claim. + Same as --oidc-required-claim flag. + Only string claim values are supported. + If claim is set and requiredValue is not set, the claim must be present with a value set to the empty string. + Mutually exclusive with expression and message. + type: string + type: object + type: array + issuer: + description: issuer contains the basic OIDC provider connection + options. + properties: + audienceMatchPolicy: + description: |- + audienceMatchPolicy defines how the "audiences" field is used to match the "aud" claim in the presented JWT. + Allowed values are: + 1. "MatchAny" when multiple audiences are specified and + 2. empty (or unset) or "MatchAny" when a single audience is specified. + + - MatchAny: the "aud" claim in the presented JWT must match at least one of the entries in the "audiences" field. + For example, if "audiences" is ["foo", "bar"], the "aud" claim in the presented JWT must contain either "foo" or "bar" (and may contain both). + + - "": The match policy can be empty (or unset) when a single audience is specified in the "audiences" field. The "aud" claim in the presented JWT must contain the single audience (and may contain others). + + For more nuanced audience validation, use claimValidationRules. + example: claimValidationRule[].expression: 'sets.equivalent(claims.aud, ["bar", "foo", "baz"])' to require an exact match. + type: string + audiences: + description: |- + audiences is the set of acceptable audiences the JWT must be issued to. + At least one of the entries must match the "aud" claim in presented JWTs. + Same value as the --oidc-client-id flag (though this field supports an array). + Required to be non-empty. + items: + type: string + type: array + certificateAuthority: + description: |- + certificateAuthority contains PEM-encoded certificate authority certificates + used to validate the connection when fetching discovery information. + If unset, the system verifier is used. + Same value as the content of the file referenced by the --oidc-ca-file flag. + type: string + discoveryURL: + description: |- + discoveryURL, if specified, overrides the URL used to fetch discovery + information instead of using "{url}/.well-known/openid-configuration". + The exact value specified is used, so "/.well-known/openid-configuration" + must be included in discoveryURL if needed. + + The "issuer" field in the fetched discovery information must match the "issuer.url" field + in the AuthenticationConfiguration and will be used to validate the "iss" claim in the presented JWT. + This is for scenarios where the well-known and jwks endpoints are hosted at a different + location than the issuer (such as locally in the cluster). + + Example: + A discovery url that is exposed using kubernetes service 'oidc' in namespace 'oidc-namespace' + and discovery information is available at '/.well-known/openid-configuration'. + discoveryURL: "https://oidc.oidc-namespace/.well-known/openid-configuration" + certificateAuthority is used to verify the TLS connection and the hostname on the leaf certificate + must be set to 'oidc.oidc-namespace'. + + curl https://oidc.oidc-namespace/.well-known/openid-configuration (.discoveryURL field) + { + issuer: "https://oidc.example.com" (.url field) + } + + discoveryURL must be different from url. + Required to be unique across all JWT authenticators. + Note that egress selection configuration is not used for this network connection. + type: string + url: + description: |- + url points to the issuer URL in a format https://url or https://url/path. + This must match the "iss" claim in the presented JWT, and the issuer returned from discovery. + Same value as the --oidc-issuer-url flag. + Discovery information is fetched from "{url}/.well-known/openid-configuration" unless overridden by discoveryURL. + Required to be unique across all JWT authenticators. + Note that egress selection configuration is not used for this network connection. + type: string + required: + - audiences + - url + type: object + userValidationRules: + description: |- + userValidationRules are rules that are applied to final user before completing authentication. + These allow invariants to be applied to incoming identities such as preventing the + use of the system: prefix that is commonly used by Kubernetes components. + The validation rules are logically ANDed together and must all return true for the validation to pass. + items: + description: UserValidationRule provides the configuration + for a single user info validation rule. + properties: + expression: + description: |- + expression represents the expression which will be evaluated by CEL. + Must return true for the validation to pass. + + CEL expressions have access to the contents of UserInfo, organized into CEL variable: + - 'user' - authentication.k8s.io/v1, Kind=UserInfo object + Refer to https://github.com/kubernetes/api/blob/release-1.28/authentication/v1/types.go#L105-L122 for the definition. + API documentation: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.28/#userinfo-v1-authentication-k8s-io + + Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + type: string + message: + description: |- + message customizes the returned error message when rule returns false. + message is a literal string. + type: string + required: + - expression + type: object + type: array + required: + - claimMappings + - issuer + type: object + type: array + k8s: + description: |- + Kubernetes authentication configuration. + Enables authentication using Kubernetes service account tokens. + Useful for integrating with existing Kubernetes RBAC policies. + properties: + enabled: + default: false + description: |- + Enable Kubernetes authentication. + When enabled, clients can authenticate using Kubernetes service account tokens. + type: boolean + type: object + type: object + baseDomain: + description: |- + Base domain used to construct FQDNs for all service endpoints. + This domain will be used to generate the default hostnames for Routes, Ingresses, and certificates. + Example: "example.com" will generate endpoints like "grpc.example.com", "router.example.com" + pattern: ^[a-z0-9]([a-z0-9\-\.]*[a-z0-9])?$ + type: string + controller: + default: {} + description: |- + Controller configuration for the main Jumpstarter API and gRPC services. + The controller handles gRPC and REST API requests from clients and exporters. + properties: + authentication: + description: |- + Authentication configuration for client and exporter authentication. + Configures how clients and exporters can authenticate with Jumpstarter. + Supports multiple authentication methods including internal tokens, Kubernetes tokens, and JWT. + properties: + internal: + description: |- + Internal authentication configuration. + Built-in authenticator that issues tokens for clients and exporters. + This is the simplest authentication method and is enabled by default. + properties: + enabled: + default: true + description: |- + Enable the internal authentication method. + When disabled, clients cannot use internal tokens for authentication. + type: boolean + prefix: + default: 'internal:' + description: |- + Prefix to add to the subject claim of issued tokens. + Helps distinguish internal tokens from other authentication methods. + Example: "internal:" will result in subjects like "internal:user123" + maxLength: 50 + type: string + tokenLifetime: + default: 43800h + description: |- + Token validity duration for issued tokens. + After this duration, tokens expire and must be renewed. + type: string + type: object + jwt: + description: |- + JWT authentication configuration. + Enables authentication using external JWT tokens from OIDC providers. + Supports multiple JWT authenticators for different identity providers. + items: + description: JWTAuthenticator provides the configuration + for a single JWT authenticator. + properties: + claimMappings: + description: claimMappings points claims of a token + to be treated as user attributes. + properties: + extra: + description: |- + extra represents an option for the extra attribute. + expression must produce a string or string array value. + If the value is empty, the extra mapping will not be present. + + hard-coded extra key/value + - key: "foo" + valueExpression: "'bar'" + This will result in an extra attribute - foo: ["bar"] + + hard-coded key, value copying claim value + - key: "foo" + valueExpression: "claims.some_claim" + This will result in an extra attribute - foo: [value of some_claim] + + hard-coded key, value derived from claim value + - key: "admin" + valueExpression: '(has(claims.is_admin) && claims.is_admin) ? "true":""' + This will result in: + - if is_admin claim is present and true, extra attribute - admin: ["true"] + - if is_admin claim is present and false or is_admin claim is not present, no extra attribute will be added + items: + description: ExtraMapping provides the configuration + for a single extra mapping. + properties: + key: + description: |- + key is a string to use as the extra attribute key. + key must be a domain-prefix path (e.g. example.org/foo). All characters before the first "/" must be a valid + subdomain as defined by RFC 1123. All characters trailing the first "/" must + be valid HTTP Path characters as defined by RFC 3986. + key must be lowercase. + Required to be unique. + type: string + valueExpression: + description: |- + valueExpression is a CEL expression to extract extra attribute value. + valueExpression must produce a string or string array value. + "", [], and null values are treated as the extra mapping not being present. + Empty string values contained within a string array are filtered out. + + CEL expressions have access to the contents of the token claims, organized into CEL variable: + - 'claims' is a map of claim names to claim values. + For example, a variable named 'sub' can be accessed as 'claims.sub'. + Nested claims can be accessed using dot notation, e.g. 'claims.foo.bar'. + + Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + type: string + required: + - key + - valueExpression + type: object + type: array + groups: + description: |- + groups represents an option for the groups attribute. + The claim's value must be a string or string array claim. + If groups.claim is set, the prefix must be specified (and can be the empty string). + If groups.expression is set, the expression must produce a string or string array value. + "", [], and null values are treated as the group mapping not being present. + properties: + claim: + description: |- + claim is the JWT claim to use. + Mutually exclusive with expression. + type: string + expression: + description: |- + expression represents the expression which will be evaluated by CEL. + + CEL expressions have access to the contents of the token claims, organized into CEL variable: + - 'claims' is a map of claim names to claim values. + For example, a variable named 'sub' can be accessed as 'claims.sub'. + Nested claims can be accessed using dot notation, e.g. 'claims.foo.bar'. + + Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + + Mutually exclusive with claim and prefix. + type: string + prefix: + description: |- + prefix is prepended to claim's value to prevent clashes with existing names. + prefix needs to be set if claim is set and can be the empty string. + Mutually exclusive with expression. + type: string + type: object + uid: + description: |- + uid represents an option for the uid attribute. + Claim must be a singular string claim. + If uid.expression is set, the expression must produce a string value. + properties: + claim: + description: |- + claim is the JWT claim to use. + Either claim or expression must be set. + Mutually exclusive with expression. + type: string + expression: + description: |- + expression represents the expression which will be evaluated by CEL. + + CEL expressions have access to the contents of the token claims, organized into CEL variable: + - 'claims' is a map of claim names to claim values. + For example, a variable named 'sub' can be accessed as 'claims.sub'. + Nested claims can be accessed using dot notation, e.g. 'claims.foo.bar'. + + Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + + Mutually exclusive with claim. + type: string + type: object + username: + description: |- + username represents an option for the username attribute. + The claim's value must be a singular string. + Same as the --oidc-username-claim and --oidc-username-prefix flags. + If username.expression is set, the expression must produce a string value. + If username.expression uses 'claims.email', then 'claims.email_verified' must be used in + username.expression or extra[*].valueExpression or claimValidationRules[*].expression. + An example claim validation rule expression that matches the validation automatically + applied when username.claim is set to 'email' is 'claims.?email_verified.orValue(true) == true'. By explicitly comparing + the value to true, we let type-checking see the result will be a boolean, and to make sure a non-boolean email_verified + claim will be caught at runtime. + + In the flag based approach, the --oidc-username-claim and --oidc-username-prefix are optional. If --oidc-username-claim is not set, + the default value is "sub". For the authentication config, there is no defaulting for claim or prefix. The claim and prefix must be set explicitly. + For claim, if --oidc-username-claim was not set with legacy flag approach, configure username.claim="sub" in the authentication config. + For prefix: + (1) --oidc-username-prefix="-", no prefix was added to the username. For the same behavior using authentication config, + set username.prefix="" + (2) --oidc-username-prefix="" and --oidc-username-claim != "email", prefix was "#". For the same + behavior using authentication config, set username.prefix="#" + (3) --oidc-username-prefix="". For the same behavior using authentication config, set username.prefix="" + properties: + claim: + description: |- + claim is the JWT claim to use. + Mutually exclusive with expression. + type: string + expression: + description: |- + expression represents the expression which will be evaluated by CEL. + + CEL expressions have access to the contents of the token claims, organized into CEL variable: + - 'claims' is a map of claim names to claim values. + For example, a variable named 'sub' can be accessed as 'claims.sub'. + Nested claims can be accessed using dot notation, e.g. 'claims.foo.bar'. + + Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + + Mutually exclusive with claim and prefix. + type: string + prefix: + description: |- + prefix is prepended to claim's value to prevent clashes with existing names. + prefix needs to be set if claim is set and can be the empty string. + Mutually exclusive with expression. + type: string + type: object + required: + - username + type: object + claimValidationRules: + description: claimValidationRules are rules that are + applied to validate token claims to authenticate users. + items: + description: ClaimValidationRule provides the configuration + for a single claim validation rule. + properties: + claim: + description: |- + claim is the name of a required claim. + Same as --oidc-required-claim flag. + Only string claim keys are supported. + Mutually exclusive with expression and message. + type: string + expression: + description: |- + expression represents the expression which will be evaluated by CEL. + Must produce a boolean. + + CEL expressions have access to the contents of the token claims, organized into CEL variable: + - 'claims' is a map of claim names to claim values. + For example, a variable named 'sub' can be accessed as 'claims.sub'. + Nested claims can be accessed using dot notation, e.g. 'claims.foo.bar'. + Must return true for the validation to pass. + + Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + + Mutually exclusive with claim and requiredValue. + type: string + message: + description: |- + message customizes the returned error message when expression returns false. + message is a literal string. + Mutually exclusive with claim and requiredValue. + type: string + requiredValue: + description: |- + requiredValue is the value of a required claim. + Same as --oidc-required-claim flag. + Only string claim values are supported. + If claim is set and requiredValue is not set, the claim must be present with a value set to the empty string. + Mutually exclusive with expression and message. + type: string + type: object + type: array + issuer: + description: issuer contains the basic OIDC provider + connection options. + properties: + audienceMatchPolicy: + description: |- + audienceMatchPolicy defines how the "audiences" field is used to match the "aud" claim in the presented JWT. + Allowed values are: + 1. "MatchAny" when multiple audiences are specified and + 2. empty (or unset) or "MatchAny" when a single audience is specified. + + - MatchAny: the "aud" claim in the presented JWT must match at least one of the entries in the "audiences" field. + For example, if "audiences" is ["foo", "bar"], the "aud" claim in the presented JWT must contain either "foo" or "bar" (and may contain both). + + - "": The match policy can be empty (or unset) when a single audience is specified in the "audiences" field. The "aud" claim in the presented JWT must contain the single audience (and may contain others). + + For more nuanced audience validation, use claimValidationRules. + example: claimValidationRule[].expression: 'sets.equivalent(claims.aud, ["bar", "foo", "baz"])' to require an exact match. + type: string + audiences: + description: |- + audiences is the set of acceptable audiences the JWT must be issued to. + At least one of the entries must match the "aud" claim in presented JWTs. + Same value as the --oidc-client-id flag (though this field supports an array). + Required to be non-empty. + items: + type: string + type: array + certificateAuthority: + description: |- + certificateAuthority contains PEM-encoded certificate authority certificates + used to validate the connection when fetching discovery information. + If unset, the system verifier is used. + Same value as the content of the file referenced by the --oidc-ca-file flag. + type: string + discoveryURL: + description: |- + discoveryURL, if specified, overrides the URL used to fetch discovery + information instead of using "{url}/.well-known/openid-configuration". + The exact value specified is used, so "/.well-known/openid-configuration" + must be included in discoveryURL if needed. + + The "issuer" field in the fetched discovery information must match the "issuer.url" field + in the AuthenticationConfiguration and will be used to validate the "iss" claim in the presented JWT. + This is for scenarios where the well-known and jwks endpoints are hosted at a different + location than the issuer (such as locally in the cluster). + + Example: + A discovery url that is exposed using kubernetes service 'oidc' in namespace 'oidc-namespace' + and discovery information is available at '/.well-known/openid-configuration'. + discoveryURL: "https://oidc.oidc-namespace/.well-known/openid-configuration" + certificateAuthority is used to verify the TLS connection and the hostname on the leaf certificate + must be set to 'oidc.oidc-namespace'. + + curl https://oidc.oidc-namespace/.well-known/openid-configuration (.discoveryURL field) + { + issuer: "https://oidc.example.com" (.url field) + } + + discoveryURL must be different from url. + Required to be unique across all JWT authenticators. + Note that egress selection configuration is not used for this network connection. + type: string + url: + description: |- + url points to the issuer URL in a format https://url or https://url/path. + This must match the "iss" claim in the presented JWT, and the issuer returned from discovery. + Same value as the --oidc-issuer-url flag. + Discovery information is fetched from "{url}/.well-known/openid-configuration" unless overridden by discoveryURL. + Required to be unique across all JWT authenticators. + Note that egress selection configuration is not used for this network connection. + type: string + required: + - audiences + - url + type: object + userValidationRules: + description: |- + userValidationRules are rules that are applied to final user before completing authentication. + These allow invariants to be applied to incoming identities such as preventing the + use of the system: prefix that is commonly used by Kubernetes components. + The validation rules are logically ANDed together and must all return true for the validation to pass. + items: + description: UserValidationRule provides the configuration + for a single user info validation rule. + properties: + expression: + description: |- + expression represents the expression which will be evaluated by CEL. + Must return true for the validation to pass. + + CEL expressions have access to the contents of UserInfo, organized into CEL variable: + - 'user' - authentication.k8s.io/v1, Kind=UserInfo object + Refer to https://github.com/kubernetes/api/blob/release-1.28/authentication/v1/types.go#L105-L122 for the definition. + API documentation: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.28/#userinfo-v1-authentication-k8s-io + + Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + type: string + message: + description: |- + message customizes the returned error message when rule returns false. + message is a literal string. + type: string + required: + - expression + type: object + type: array + required: + - claimMappings + - issuer + type: object + type: array + k8s: + description: |- + Kubernetes authentication configuration. + Enables authentication using Kubernetes service account tokens. + Useful for integrating with existing Kubernetes RBAC policies. + properties: + enabled: + default: false + description: |- + Enable Kubernetes authentication. + When enabled, clients can authenticate using Kubernetes service account tokens. + type: boolean + type: object + type: object + exporterOptions: + description: |- + Exporter options configuration. + Controls how exporters connect and behave when communicating with the controller. + properties: + offlineTimeout: + default: 180s + description: |- + Offline timeout duration for exporters. + After this duration without communication, an exporter is considered offline. + This drives the online/offline status field of exporters, and offline exporters + won't be considered for leases. + type: string + type: object + grpc: + description: |- + gRPC configuration for controller endpoints. + Defines how controller gRPC services are exposed and configured. + properties: + endpoints: + description: |- + List of gRPC endpoints to expose. + Each endpoint can use different networking methods (Route, Ingress, NodePort, or LoadBalancer) + based on your cluster setup. Example: Use Route for OpenShift, Ingress for standard Kubernetes. + items: + description: |- + Endpoint defines a single endpoint configuration. + An endpoint can use one or more networking methods: Route, Ingress, NodePort, or LoadBalancer. + Multiple methods can be configured simultaneously for the same address. + properties: + address: + description: |- + Address for this endpoint in the format "hostname", "hostname:port", "IPv4", "IPv4:port", "[IPv6]", or "[IPv6]:port". + Required for Route and Ingress endpoints. Optional for NodePort and LoadBalancer endpoints. + When optional, the address is used for certificate generation and DNS resolution. + Supports templating with $(replica) for replica-specific addresses. + Examples: "grpc.example.com", "grpc.example.com:9090", "192.168.1.1:8080", "[2001:db8::1]:8443", "router-$(replica).example.com" + pattern: ^(\[[0-9a-fA-F:\.]+\]|[0-9]+(\.[0-9]+){3}|[a-z0-9$]([a-z0-9\-\.\$\(\)]*[a-z0-9\)])?)(:[0-9]+)?$ + type: string + clusterIP: + description: |- + ClusterIP configuration for internal service access. + Creates a ClusterIP service for this endpoint. + Useful for internal service-to-service communication or when + using a different method to expose the service externally. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations to add to the ClusterIP service. + Useful for configuring service-specific behavior and load balancer options. + type: object + enabled: + description: |- + Enable the ClusterIP service for this endpoint. + When disabled, no ClusterIP service will be created for this endpoint. + type: boolean + labels: + additionalProperties: + type: string + description: |- + Labels to add to the ClusterIP service. + Useful for monitoring, cost allocation, and resource organization. + type: object + type: object + ingress: + description: |- + Ingress configuration for standard Kubernetes clusters. + Creates an Ingress resource for this endpoint. + Requires an ingress controller to be installed. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations to add to the Kubernetes Ingress resource. + Useful for configuring ingress-specific behavior, TLS settings, and load balancer options. + type: object + class: + default: default + description: |- + Ingress class name for the Kubernetes Ingress. + Specifies which ingress controller should handle this ingress. + type: string + enabled: + description: |- + Enable the Kubernetes Ingress for this endpoint. + When disabled, no Ingress resource will be created for this endpoint. + When not specified, the operator will determine the best networking option for your cluster. + type: boolean + labels: + additionalProperties: + type: string + description: |- + Labels to add to the Kubernetes Ingress resource. + Useful for monitoring, cost allocation, and resource organization. + type: object + type: object + loadBalancer: + description: |- + LoadBalancer configuration for cloud environments. + Creates a LoadBalancer service for this endpoint. + Requires cloud provider support for LoadBalancer services. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations to add to the LoadBalancer service. + Useful for configuring cloud provider-specific load balancer options. + Example: "service.beta.kubernetes.io/aws-load-balancer-type: nlb" + type: object + enabled: + description: |- + Enable the LoadBalancer service for this endpoint. + When disabled, no LoadBalancer service will be created for this endpoint. + When not specified, the operator will determine the best networking option for your cluster. + type: boolean + labels: + additionalProperties: + type: string + description: |- + Labels to add to the LoadBalancer service. + Useful for monitoring, cost allocation, and resource organization. + type: object + port: + description: |- + Port number for the LoadBalancer service. + Must be a valid port number (1-65535). + format: int32 + maximum: 65535 + minimum: 1 + type: integer + type: object + nodeport: + description: |- + NodePort configuration for direct node access. + Exposes the service on a specific port on each node. + Useful for bare-metal or simple cluster setups. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations to add to the NodePort service. + Useful for configuring service-specific behavior and load balancer options. + type: object + enabled: + description: |- + Enable the NodePort service for this endpoint. + When disabled, no NodePort service will be created for this endpoint. + When not specified, the operator will determine the best networking option for your cluster. + type: boolean + labels: + additionalProperties: + type: string + description: |- + Labels to add to the NodePort service. + Useful for monitoring, cost allocation, and resource organization. + type: object + port: + description: |- + NodePort port number to expose on each node. + Must be in the range 30000-32767 for most Kubernetes clusters. + format: int32 + maximum: 65535 + minimum: 1 + type: integer + type: object + route: + description: |- + Route configuration for OpenShift clusters. + Creates an OpenShift Route resource for this endpoint. + Only applicable in OpenShift environments. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations to add to the OpenShift Route resource. + Useful for configuring route-specific behavior and TLS settings. + type: object + enabled: + description: |- + Enable the OpenShift Route for this endpoint. + When disabled, no Route resource will be created for this endpoint. + When not specified, the operator will determine the best networking option for your cluster. + type: boolean + labels: + additionalProperties: + type: string + description: |- + Labels to add to the OpenShift Route resource. + Useful for monitoring, cost allocation, and resource organization. + type: object + type: object + type: object + type: array + keepalive: + description: |- + Keepalive configuration for gRPC connections. + Controls connection health checks and idle connection management. + Helps maintain stable connections in load-balanced environments. + properties: + intervalTime: + default: 10s + description: |- + Interval between keepalive pings. + How often to send keepalive pings to check connection health. This is important + to keep TCP gRPC connections alive when traversing load balancers and proxies. + type: string + maxConnectionAge: + description: |- + Maximum age of a connection before it is closed and recreated. + Helps prevent issues with long-lived connections. It defaults to infinity. + type: string + maxConnectionAgeGrace: + description: |- + Grace period for closing connections that exceed MaxConnectionAge. + Allows ongoing RPCs to complete before closing the connection. + type: string + maxConnectionIdle: + description: |- + Maximum time a connection can remain idle before being closed. + It defaults to infinity. + type: string + minTime: + default: 1s + description: |- + Minimum time between keepalives that the connection will accept, under this threshold + the other side will get a GOAWAY signal. + Prevents excessive keepalive traffic on the network. + type: string + permitWithoutStream: + default: true + description: |- + Allow keepalive pings even when there are no active RPC streams. + Useful for detecting connection issues in idle connections. + This is important to keep TCP gRPC connections alive when traversing + load balancers and proxies. + type: boolean + timeout: + default: 180s + description: |- + Timeout for keepalive ping acknowledgment. + If a ping is not acknowledged within this time, the connection is considered broken. + The default is high to avoid issues when the network on a exporter is overloaded, i.e. + during flashing. + type: string + type: object + tls: + description: |- + TLS configuration for secure gRPC communication. + Requires a Kubernetes secret containing the TLS certificate and private key. + If useCertManager is enabled, this secret will be automatically created. + See also: spec.useCertManager for automatic certificate management. + properties: + certSecret: + description: |- + Name of the Kubernetes secret containing the TLS certificate and private key. + The secret must contain 'tls.crt' and 'tls.key' keys. + If useCertManager is enabled, this secret will be automatically created. + pattern: ^[a-z0-9]([a-z0-9\-\.]*[a-z0-9])?$ + type: string + type: object + type: object + image: + default: quay.io/jumpstarter-dev/jumpstarter-controller:latest + description: |- + Container image for the controller pods in 'registry/repository/image:tag' format. + If not specified, defaults to the latest stable version of the Jumpstarter controller. + type: string + imagePullPolicy: + default: IfNotPresent + description: |- + Image pull policy for the controller container. + Controls when the container image should be pulled from the registry. + enum: + - Always + - IfNotPresent + - Never + type: string + replicas: + default: 2 + description: |- + Number of controller replicas to run. + Must be a positive integer. Minimum recommended value is 2 for high availability. + format: int32 + minimum: 1 + type: integer + resources: + description: |- + Resource requirements for controller pods. + Defines CPU and memory requests and limits for each controller pod. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restApi: + description: |- + REST API configuration for HTTP-based clients. + Enables non-gRPC clients to interact with Jumpstarter for listing leases, + managing exporters, and creating new leases. Use this when you need HTTP/JSON access. + properties: + endpoints: + description: |- + List of REST API endpoints to expose. + Each endpoint can use different networking methods (Route, Ingress, NodePort, or LoadBalancer) + based on your cluster setup. + items: + description: |- + Endpoint defines a single endpoint configuration. + An endpoint can use one or more networking methods: Route, Ingress, NodePort, or LoadBalancer. + Multiple methods can be configured simultaneously for the same address. + properties: + address: + description: |- + Address for this endpoint in the format "hostname", "hostname:port", "IPv4", "IPv4:port", "[IPv6]", or "[IPv6]:port". + Required for Route and Ingress endpoints. Optional for NodePort and LoadBalancer endpoints. + When optional, the address is used for certificate generation and DNS resolution. + Supports templating with $(replica) for replica-specific addresses. + Examples: "grpc.example.com", "grpc.example.com:9090", "192.168.1.1:8080", "[2001:db8::1]:8443", "router-$(replica).example.com" + pattern: ^(\[[0-9a-fA-F:\.]+\]|[0-9]+(\.[0-9]+){3}|[a-z0-9$]([a-z0-9\-\.\$\(\)]*[a-z0-9\)])?)(:[0-9]+)?$ + type: string + clusterIP: + description: |- + ClusterIP configuration for internal service access. + Creates a ClusterIP service for this endpoint. + Useful for internal service-to-service communication or when + using a different method to expose the service externally. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations to add to the ClusterIP service. + Useful for configuring service-specific behavior and load balancer options. + type: object + enabled: + description: |- + Enable the ClusterIP service for this endpoint. + When disabled, no ClusterIP service will be created for this endpoint. + type: boolean + labels: + additionalProperties: + type: string + description: |- + Labels to add to the ClusterIP service. + Useful for monitoring, cost allocation, and resource organization. + type: object + type: object + ingress: + description: |- + Ingress configuration for standard Kubernetes clusters. + Creates an Ingress resource for this endpoint. + Requires an ingress controller to be installed. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations to add to the Kubernetes Ingress resource. + Useful for configuring ingress-specific behavior, TLS settings, and load balancer options. + type: object + class: + default: default + description: |- + Ingress class name for the Kubernetes Ingress. + Specifies which ingress controller should handle this ingress. + type: string + enabled: + description: |- + Enable the Kubernetes Ingress for this endpoint. + When disabled, no Ingress resource will be created for this endpoint. + When not specified, the operator will determine the best networking option for your cluster. + type: boolean + labels: + additionalProperties: + type: string + description: |- + Labels to add to the Kubernetes Ingress resource. + Useful for monitoring, cost allocation, and resource organization. + type: object + type: object + loadBalancer: + description: |- + LoadBalancer configuration for cloud environments. + Creates a LoadBalancer service for this endpoint. + Requires cloud provider support for LoadBalancer services. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations to add to the LoadBalancer service. + Useful for configuring cloud provider-specific load balancer options. + Example: "service.beta.kubernetes.io/aws-load-balancer-type: nlb" + type: object + enabled: + description: |- + Enable the LoadBalancer service for this endpoint. + When disabled, no LoadBalancer service will be created for this endpoint. + When not specified, the operator will determine the best networking option for your cluster. + type: boolean + labels: + additionalProperties: + type: string + description: |- + Labels to add to the LoadBalancer service. + Useful for monitoring, cost allocation, and resource organization. + type: object + port: + description: |- + Port number for the LoadBalancer service. + Must be a valid port number (1-65535). + format: int32 + maximum: 65535 + minimum: 1 + type: integer + type: object + nodeport: + description: |- + NodePort configuration for direct node access. + Exposes the service on a specific port on each node. + Useful for bare-metal or simple cluster setups. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations to add to the NodePort service. + Useful for configuring service-specific behavior and load balancer options. + type: object + enabled: + description: |- + Enable the NodePort service for this endpoint. + When disabled, no NodePort service will be created for this endpoint. + When not specified, the operator will determine the best networking option for your cluster. + type: boolean + labels: + additionalProperties: + type: string + description: |- + Labels to add to the NodePort service. + Useful for monitoring, cost allocation, and resource organization. + type: object + port: + description: |- + NodePort port number to expose on each node. + Must be in the range 30000-32767 for most Kubernetes clusters. + format: int32 + maximum: 65535 + minimum: 1 + type: integer + type: object + route: + description: |- + Route configuration for OpenShift clusters. + Creates an OpenShift Route resource for this endpoint. + Only applicable in OpenShift environments. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations to add to the OpenShift Route resource. + Useful for configuring route-specific behavior and TLS settings. + type: object + enabled: + description: |- + Enable the OpenShift Route for this endpoint. + When disabled, no Route resource will be created for this endpoint. + When not specified, the operator will determine the best networking option for your cluster. + type: boolean + labels: + additionalProperties: + type: string + description: |- + Labels to add to the OpenShift Route resource. + Useful for monitoring, cost allocation, and resource organization. + type: object + type: object + type: object + type: array + tls: + description: |- + TLS configuration for secure HTTP communication. + Requires a Kubernetes secret containing the TLS certificate and private key. + properties: + certSecret: + description: |- + Name of the Kubernetes secret containing the TLS certificate and private key. + The secret must contain 'tls.crt' and 'tls.key' keys. + If useCertManager is enabled, this secret will be automatically created. + pattern: ^[a-z0-9]([a-z0-9\-\.]*[a-z0-9])?$ + type: string + type: object + type: object + type: object + routers: + default: {} + description: |- + Router configuration for the Jumpstarter router service. + Routers handle gRPC traffic routing and load balancing. + properties: + grpc: + description: |- + gRPC configuration for router endpoints. + Defines how router gRPC services are exposed and configured. + properties: + endpoints: + description: |- + List of gRPC endpoints to expose. + Each endpoint can use different networking methods (Route, Ingress, NodePort, or LoadBalancer) + based on your cluster setup. Example: Use Route for OpenShift, Ingress for standard Kubernetes. + items: + description: |- + Endpoint defines a single endpoint configuration. + An endpoint can use one or more networking methods: Route, Ingress, NodePort, or LoadBalancer. + Multiple methods can be configured simultaneously for the same address. + properties: + address: + description: |- + Address for this endpoint in the format "hostname", "hostname:port", "IPv4", "IPv4:port", "[IPv6]", or "[IPv6]:port". + Required for Route and Ingress endpoints. Optional for NodePort and LoadBalancer endpoints. + When optional, the address is used for certificate generation and DNS resolution. + Supports templating with $(replica) for replica-specific addresses. + Examples: "grpc.example.com", "grpc.example.com:9090", "192.168.1.1:8080", "[2001:db8::1]:8443", "router-$(replica).example.com" + pattern: ^(\[[0-9a-fA-F:\.]+\]|[0-9]+(\.[0-9]+){3}|[a-z0-9$]([a-z0-9\-\.\$\(\)]*[a-z0-9\)])?)(:[0-9]+)?$ + type: string + clusterIP: + description: |- + ClusterIP configuration for internal service access. + Creates a ClusterIP service for this endpoint. + Useful for internal service-to-service communication or when + using a different method to expose the service externally. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations to add to the ClusterIP service. + Useful for configuring service-specific behavior and load balancer options. + type: object + enabled: + description: |- + Enable the ClusterIP service for this endpoint. + When disabled, no ClusterIP service will be created for this endpoint. + type: boolean + labels: + additionalProperties: + type: string + description: |- + Labels to add to the ClusterIP service. + Useful for monitoring, cost allocation, and resource organization. + type: object + type: object + ingress: + description: |- + Ingress configuration for standard Kubernetes clusters. + Creates an Ingress resource for this endpoint. + Requires an ingress controller to be installed. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations to add to the Kubernetes Ingress resource. + Useful for configuring ingress-specific behavior, TLS settings, and load balancer options. + type: object + class: + default: default + description: |- + Ingress class name for the Kubernetes Ingress. + Specifies which ingress controller should handle this ingress. + type: string + enabled: + description: |- + Enable the Kubernetes Ingress for this endpoint. + When disabled, no Ingress resource will be created for this endpoint. + When not specified, the operator will determine the best networking option for your cluster. + type: boolean + labels: + additionalProperties: + type: string + description: |- + Labels to add to the Kubernetes Ingress resource. + Useful for monitoring, cost allocation, and resource organization. + type: object + type: object + loadBalancer: + description: |- + LoadBalancer configuration for cloud environments. + Creates a LoadBalancer service for this endpoint. + Requires cloud provider support for LoadBalancer services. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations to add to the LoadBalancer service. + Useful for configuring cloud provider-specific load balancer options. + Example: "service.beta.kubernetes.io/aws-load-balancer-type: nlb" + type: object + enabled: + description: |- + Enable the LoadBalancer service for this endpoint. + When disabled, no LoadBalancer service will be created for this endpoint. + When not specified, the operator will determine the best networking option for your cluster. + type: boolean + labels: + additionalProperties: + type: string + description: |- + Labels to add to the LoadBalancer service. + Useful for monitoring, cost allocation, and resource organization. + type: object + port: + description: |- + Port number for the LoadBalancer service. + Must be a valid port number (1-65535). + format: int32 + maximum: 65535 + minimum: 1 + type: integer + type: object + nodeport: + description: |- + NodePort configuration for direct node access. + Exposes the service on a specific port on each node. + Useful for bare-metal or simple cluster setups. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations to add to the NodePort service. + Useful for configuring service-specific behavior and load balancer options. + type: object + enabled: + description: |- + Enable the NodePort service for this endpoint. + When disabled, no NodePort service will be created for this endpoint. + When not specified, the operator will determine the best networking option for your cluster. + type: boolean + labels: + additionalProperties: + type: string + description: |- + Labels to add to the NodePort service. + Useful for monitoring, cost allocation, and resource organization. + type: object + port: + description: |- + NodePort port number to expose on each node. + Must be in the range 30000-32767 for most Kubernetes clusters. + format: int32 + maximum: 65535 + minimum: 1 + type: integer + type: object + route: + description: |- + Route configuration for OpenShift clusters. + Creates an OpenShift Route resource for this endpoint. + Only applicable in OpenShift environments. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations to add to the OpenShift Route resource. + Useful for configuring route-specific behavior and TLS settings. + type: object + enabled: + description: |- + Enable the OpenShift Route for this endpoint. + When disabled, no Route resource will be created for this endpoint. + When not specified, the operator will determine the best networking option for your cluster. + type: boolean + labels: + additionalProperties: + type: string + description: |- + Labels to add to the OpenShift Route resource. + Useful for monitoring, cost allocation, and resource organization. + type: object + type: object + type: object + type: array + keepalive: + description: |- + Keepalive configuration for gRPC connections. + Controls connection health checks and idle connection management. + Helps maintain stable connections in load-balanced environments. + properties: + intervalTime: + default: 10s + description: |- + Interval between keepalive pings. + How often to send keepalive pings to check connection health. This is important + to keep TCP gRPC connections alive when traversing load balancers and proxies. + type: string + maxConnectionAge: + description: |- + Maximum age of a connection before it is closed and recreated. + Helps prevent issues with long-lived connections. It defaults to infinity. + type: string + maxConnectionAgeGrace: + description: |- + Grace period for closing connections that exceed MaxConnectionAge. + Allows ongoing RPCs to complete before closing the connection. + type: string + maxConnectionIdle: + description: |- + Maximum time a connection can remain idle before being closed. + It defaults to infinity. + type: string + minTime: + default: 1s + description: |- + Minimum time between keepalives that the connection will accept, under this threshold + the other side will get a GOAWAY signal. + Prevents excessive keepalive traffic on the network. + type: string + permitWithoutStream: + default: true + description: |- + Allow keepalive pings even when there are no active RPC streams. + Useful for detecting connection issues in idle connections. + This is important to keep TCP gRPC connections alive when traversing + load balancers and proxies. + type: boolean + timeout: + default: 180s + description: |- + Timeout for keepalive ping acknowledgment. + If a ping is not acknowledged within this time, the connection is considered broken. + The default is high to avoid issues when the network on a exporter is overloaded, i.e. + during flashing. + type: string + type: object + tls: + description: |- + TLS configuration for secure gRPC communication. + Requires a Kubernetes secret containing the TLS certificate and private key. + If useCertManager is enabled, this secret will be automatically created. + See also: spec.useCertManager for automatic certificate management. + properties: + certSecret: + description: |- + Name of the Kubernetes secret containing the TLS certificate and private key. + The secret must contain 'tls.crt' and 'tls.key' keys. + If useCertManager is enabled, this secret will be automatically created. + pattern: ^[a-z0-9]([a-z0-9\-\.]*[a-z0-9])?$ + type: string + type: object + type: object + image: + default: quay.io/jumpstarter-dev/jumpstarter-controller:latest + description: |- + Container image for the router pods in 'registry/repository/image:tag' format. + If not specified, defaults to the latest stable version of the Jumpstarter router. + type: string + imagePullPolicy: + default: IfNotPresent + description: |- + Image pull policy for the router container. + Controls when the container image should be pulled from the registry. + enum: + - Always + - IfNotPresent + - Never + type: string + replicas: + default: 3 + description: |- + Number of router replicas to run. + Must be a positive integer. Minimum recommended value is 3 for high availability. + format: int32 + minimum: 1 + type: integer + resources: + description: |- + Resource requirements for router pods. + Defines CPU and memory requests and limits for each router pod. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + topologySpreadConstraints: + description: |- + Topology spread constraints for router pod distribution. + Ensures router pods are distributed evenly across nodes and zones. + Useful for high availability and fault tolerance. + items: + description: TopologySpreadConstraint specifies how to spread + matching pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + type: object + useCertManager: + default: true + description: |- + Enable automatic TLS certificate management using cert-manager. + When enabled, jumpstarter will interact with cert-manager to automatically provision + and renew TLS certificates for all endpoints. Requires cert-manager to be installed in the cluster. + type: boolean + type: object + status: + description: |- + JumpstarterStatus defines the observed state of Jumpstarter. + This field is currently empty but can be extended to include status information + such as deployment status, endpoint URLs, and health information. + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/controller/deploy/operator/config/crd/kustomization.yaml b/controller/deploy/operator/config/crd/kustomization.yaml new file mode 100644 index 000000000..70cefba92 --- /dev/null +++ b/controller/deploy/operator/config/crd/kustomization.yaml @@ -0,0 +1,21 @@ +# This kustomization.yaml is not intended to be run by itself, +# since it depends on service name and namespace that are out of this kustomize package. +# It should be run by config/default +resources: +- bases/operator.jumpstarter.dev_jumpstarters.yaml +- bases/jumpstarter.dev_clients.yaml +- bases/jumpstarter.dev_exporters.yaml +- bases/jumpstarter.dev_leases.yaml +- bases/jumpstarter.dev_exporteraccesspolicies.yaml + +# +kubebuilder:scaffold:crdkustomizeresource + +patches: +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. +# patches here are for enabling the conversion webhook for each CRD +# +kubebuilder:scaffold:crdkustomizewebhookpatch + +# [WEBHOOK] To enable webhook, uncomment the following section +# the following config is for teaching kustomize how to do kustomization for CRDs. +#configurations: +#- kustomizeconfig.yaml diff --git a/controller/deploy/operator/config/crd/kustomizeconfig.yaml b/controller/deploy/operator/config/crd/kustomizeconfig.yaml new file mode 100644 index 000000000..ec5c150a9 --- /dev/null +++ b/controller/deploy/operator/config/crd/kustomizeconfig.yaml @@ -0,0 +1,19 @@ +# This file is for teaching kustomize how to substitute name and namespace reference in CRD +nameReference: +- kind: Service + version: v1 + fieldSpecs: + - kind: CustomResourceDefinition + version: v1 + group: apiextensions.k8s.io + path: spec/conversion/webhook/clientConfig/service/name + +namespace: +- kind: CustomResourceDefinition + version: v1 + group: apiextensions.k8s.io + path: spec/conversion/webhook/clientConfig/service/namespace + create: false + +varReference: +- path: metadata/annotations diff --git a/controller/deploy/operator/config/default/cert_metrics_manager_patch.yaml b/controller/deploy/operator/config/default/cert_metrics_manager_patch.yaml new file mode 100644 index 000000000..d97501553 --- /dev/null +++ b/controller/deploy/operator/config/default/cert_metrics_manager_patch.yaml @@ -0,0 +1,30 @@ +# This patch adds the args, volumes, and ports to allow the manager to use the metrics-server certs. + +# Add the volumeMount for the metrics-server certs +- op: add + path: /spec/template/spec/containers/0/volumeMounts/- + value: + mountPath: /tmp/k8s-metrics-server/metrics-certs + name: metrics-certs + readOnly: true + +# Add the --metrics-cert-path argument for the metrics server +- op: add + path: /spec/template/spec/containers/0/args/- + value: --metrics-cert-path=/tmp/k8s-metrics-server/metrics-certs + +# Add the metrics-server certs volume configuration +- op: add + path: /spec/template/spec/volumes/- + value: + name: metrics-certs + secret: + secretName: metrics-server-cert + optional: false + items: + - key: ca.crt + path: ca.crt + - key: tls.crt + path: tls.crt + - key: tls.key + path: tls.key diff --git a/controller/deploy/operator/config/default/kustomization.yaml b/controller/deploy/operator/config/default/kustomization.yaml new file mode 100644 index 000000000..311f3cb57 --- /dev/null +++ b/controller/deploy/operator/config/default/kustomization.yaml @@ -0,0 +1,234 @@ +# Adds namespace to all resources. +namespace: jumpstarter-operator-system + +# Value of this field is prepended to the +# names of all resources, e.g. a deployment named +# "wordpress" becomes "alices-wordpress". +# Note that it should also match with the prefix (text before '-') of the namespace +# field above. +namePrefix: jumpstarter-operator- + +# Labels to add to all resources and selectors. +#labels: +#- includeSelectors: true +# pairs: +# someName: someValue + +resources: +- ../crd +- ../rbac +- ../manager +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in +# crd/kustomization.yaml +#- ../webhook +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required. +#- ../certmanager +# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. +#- ../prometheus +# [METRICS] Expose the controller manager metrics service. +- metrics_service.yaml +# [NETWORK POLICY] Protect the /metrics endpoint and Webhook Server with NetworkPolicy. +# Only Pod(s) running a namespace labeled with 'metrics: enabled' will be able to gather the metrics. +# Only CR(s) which requires webhooks and are applied on namespaces labeled with 'webhooks: enabled' will +# be able to communicate with the Webhook Server. +#- ../network-policy + +# Uncomment the patches line if you enable Metrics +patches: +# [METRICS] The following patch will enable the metrics endpoint using HTTPS and the port :8443. +# More info: https://book.kubebuilder.io/reference/metrics +- path: manager_metrics_patch.yaml + target: + kind: Deployment + +# Uncomment the patches line if you enable Metrics and CertManager +# [METRICS-WITH-CERTS] To enable metrics protected with certManager, uncomment the following line. +# This patch will protect the metrics with certManager self-signed certs. +#- path: cert_metrics_manager_patch.yaml +# target: +# kind: Deployment + +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in +# crd/kustomization.yaml +#- path: manager_webhook_patch.yaml +# target: +# kind: Deployment + +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix. +# Uncomment the following replacements to add the cert-manager CA injection annotations +#replacements: +# - source: # Uncomment the following block to enable certificates for metrics +# kind: Service +# version: v1 +# name: controller-manager-metrics-service +# fieldPath: metadata.name +# targets: +# - select: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: metrics-certs +# fieldPaths: +# - spec.dnsNames.0 +# - spec.dnsNames.1 +# options: +# delimiter: '.' +# index: 0 +# create: true +# - select: # Uncomment the following to set the Service name for TLS config in Prometheus ServiceMonitor +# kind: ServiceMonitor +# group: monitoring.coreos.com +# version: v1 +# name: controller-manager-metrics-monitor +# fieldPaths: +# - spec.endpoints.0.tlsConfig.serverName +# options: +# delimiter: '.' +# index: 0 +# create: true +# +# - source: +# kind: Service +# version: v1 +# name: controller-manager-metrics-service +# fieldPath: metadata.namespace +# targets: +# - select: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: metrics-certs +# fieldPaths: +# - spec.dnsNames.0 +# - spec.dnsNames.1 +# options: +# delimiter: '.' +# index: 1 +# create: true +# - select: # Uncomment the following to set the Service namespace for TLS in Prometheus ServiceMonitor +# kind: ServiceMonitor +# group: monitoring.coreos.com +# version: v1 +# name: controller-manager-metrics-monitor +# fieldPaths: +# - spec.endpoints.0.tlsConfig.serverName +# options: +# delimiter: '.' +# index: 1 +# create: true +# +# - source: # Uncomment the following block if you have any webhook +# kind: Service +# version: v1 +# name: webhook-service +# fieldPath: .metadata.name # Name of the service +# targets: +# - select: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert +# fieldPaths: +# - .spec.dnsNames.0 +# - .spec.dnsNames.1 +# options: +# delimiter: '.' +# index: 0 +# create: true +# - source: +# kind: Service +# version: v1 +# name: webhook-service +# fieldPath: .metadata.namespace # Namespace of the service +# targets: +# - select: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert +# fieldPaths: +# - .spec.dnsNames.0 +# - .spec.dnsNames.1 +# options: +# delimiter: '.' +# index: 1 +# create: true +# +# - source: # Uncomment the following block if you have a ValidatingWebhook (--programmatic-validation) +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert # This name should match the one in certificate.yaml +# fieldPath: .metadata.namespace # Namespace of the certificate CR +# targets: +# - select: +# kind: ValidatingWebhookConfiguration +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 0 +# create: true +# - source: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert +# fieldPath: .metadata.name +# targets: +# - select: +# kind: ValidatingWebhookConfiguration +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 1 +# create: true +# +# - source: # Uncomment the following block if you have a DefaultingWebhook (--defaulting ) +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert +# fieldPath: .metadata.namespace # Namespace of the certificate CR +# targets: +# - select: +# kind: MutatingWebhookConfiguration +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 0 +# create: true +# - source: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert +# fieldPath: .metadata.name +# targets: +# - select: +# kind: MutatingWebhookConfiguration +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 1 +# create: true +# +# - source: # Uncomment the following block if you have a ConversionWebhook (--conversion) +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert +# fieldPath: .metadata.namespace # Namespace of the certificate CR +# targets: # Do not remove or uncomment the following scaffold marker; required to generate code for target CRD. +# +kubebuilder:scaffold:crdkustomizecainjectionns +# - source: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert +# fieldPath: .metadata.name +# targets: # Do not remove or uncomment the following scaffold marker; required to generate code for target CRD. +# +kubebuilder:scaffold:crdkustomizecainjectionname diff --git a/controller/deploy/operator/config/default/manager_metrics_patch.yaml b/controller/deploy/operator/config/default/manager_metrics_patch.yaml new file mode 100644 index 000000000..2aaef6536 --- /dev/null +++ b/controller/deploy/operator/config/default/manager_metrics_patch.yaml @@ -0,0 +1,4 @@ +# This patch adds the args to allow exposing the metrics endpoint using HTTPS +- op: add + path: /spec/template/spec/containers/0/args/0 + value: --metrics-bind-address=:8443 diff --git a/controller/deploy/operator/config/default/metrics_service.yaml b/controller/deploy/operator/config/default/metrics_service.yaml new file mode 100644 index 000000000..f18cc3a4a --- /dev/null +++ b/controller/deploy/operator/config/default/metrics_service.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + control-plane: controller-manager + app.kubernetes.io/name: jumpstarter-operator + app.kubernetes.io/managed-by: kustomize + name: controller-manager-metrics-service + namespace: system +spec: + ports: + - name: https + port: 8443 + protocol: TCP + targetPort: 8443 + selector: + control-plane: controller-manager + app.kubernetes.io/name: jumpstarter-operator diff --git a/controller/deploy/operator/config/manager/kustomization.yaml b/controller/deploy/operator/config/manager/kustomization.yaml new file mode 100644 index 000000000..9c94df038 --- /dev/null +++ b/controller/deploy/operator/config/manager/kustomization.yaml @@ -0,0 +1,8 @@ +resources: +- manager.yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +images: +- name: controller + newName: quay.io/jumpstarter-dev/jumpstarter-operator + newTag: latest diff --git a/controller/deploy/operator/config/manager/manager.yaml b/controller/deploy/operator/config/manager/manager.yaml new file mode 100644 index 000000000..36c287363 --- /dev/null +++ b/controller/deploy/operator/config/manager/manager.yaml @@ -0,0 +1,99 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + control-plane: controller-manager + app.kubernetes.io/name: jumpstarter-operator + app.kubernetes.io/managed-by: kustomize + name: system +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system + labels: + control-plane: controller-manager + app.kubernetes.io/name: jumpstarter-operator + app.kubernetes.io/managed-by: kustomize +spec: + selector: + matchLabels: + control-plane: controller-manager + app.kubernetes.io/name: jumpstarter-operator + replicas: 1 + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: manager + labels: + control-plane: controller-manager + app.kubernetes.io/name: jumpstarter-operator + spec: + # TODO(user): Uncomment the following code to configure the nodeAffinity expression + # according to the platforms which are supported by your solution. + # It is considered best practice to support multiple architectures. You can + # build your manager image using the makefile target docker-buildx. + # affinity: + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: kubernetes.io/arch + # operator: In + # values: + # - amd64 + # - arm64 + # - ppc64le + # - s390x + # - key: kubernetes.io/os + # operator: In + # values: + # - linux + securityContext: + # Projects are configured by default to adhere to the "restricted" Pod Security Standards. + # This ensures that deployments meet the highest security requirements for Kubernetes. + # For more details, see: https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + containers: + - command: + - /manager + args: + - --leader-elect + - --health-probe-bind-address=:8081 + image: quay.io/jumpstarter-dev/jumpstarter-operator:latest + imagePullPolicy: IfNotPresent + name: manager + ports: [] + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - "ALL" + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + # TODO(user): Configure the resources accordingly based on the project requirements. + # More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + resources: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 10m + memory: 256Mi + volumeMounts: [] + volumes: [] + serviceAccountName: controller-manager + terminationGracePeriodSeconds: 10 diff --git a/controller/deploy/operator/config/manifests/bases/jumpstarter-operator.clusterserviceversion.yaml b/controller/deploy/operator/config/manifests/bases/jumpstarter-operator.clusterserviceversion.yaml new file mode 100644 index 000000000..8ac483465 --- /dev/null +++ b/controller/deploy/operator/config/manifests/bases/jumpstarter-operator.clusterserviceversion.yaml @@ -0,0 +1,72 @@ +apiVersion: operators.coreos.com/v1alpha1 +kind: ClusterServiceVersion +metadata: + annotations: + alm-examples: |- + [ + { + "apiVersion": "operator.jumpstarter.dev/v1alpha1", + "kind": "Jumpstarter", + "metadata": { + "name": "jumpstarter", + "namespace": "jumpstarter" + }, + "spec": { + "baseDomain": "jumpstarter.example.com" + } + } + ] + capabilities: Basic Install + name: jumpstarter-operator.v0.0.0 + namespace: placeholder +spec: + apiservicedefinitions: {} + customresourcedefinitions: + owned: + - description: Jumpstarter is the Schema for the jumpstarters API. + displayName: Jumpstarter + kind: Jumpstarter + name: jumpstarters.operator.jumpstarter.dev + version: v1alpha1 + description: Jumpstarter is a cloud-native framework for Hardware-in-the-Loop (HIL) + automation that bridges the gap between embedded development workflows and real-world + deployment environments. This operator installs and manages the Jumpstarter Controller, + which acts as the central brain for your testing infrastructure. It orchestrates + secure, shared access to physical hardware and virtual devices (represented as + "exporters") directly from your Kubernetes or OpenShift cluster. + displayName: Jumpstarter Operator + icon: + - base64data: "" + mediatype: "" + install: + spec: + deployments: null + strategy: "" + installModes: + - supported: false + type: OwnNamespace + - supported: false + type: SingleNamespace + - supported: false + type: MultiNamespace + - supported: true + type: AllNamespaces + keywords: + - hil + - hardware-in-the-loop + - hardware + - device + - embedded + - testing + - framework + links: + - name: Jumpstarter Operator + url: https://jumpstarter.dev/main/getting-started/installation/service/index.html + maintainers: + - email: majopela@redhat.com + name: Miguel Angel Ajo + maturity: alpha + provider: + name: The Jumpstarter Community + url: https://jumpstarter.dev + version: 0.0.0 diff --git a/controller/deploy/operator/config/manifests/kustomization.yaml b/controller/deploy/operator/config/manifests/kustomization.yaml new file mode 100644 index 000000000..a87dbd9f3 --- /dev/null +++ b/controller/deploy/operator/config/manifests/kustomization.yaml @@ -0,0 +1,28 @@ +# These resources constitute the fully configured set of manifests +# used to generate the 'manifests/' directory in a bundle. +resources: +- bases/jumpstarter-operator.clusterserviceversion.yaml +- ../default +- ../samples +- ../scorecard + +# [WEBHOOK] To enable webhooks, uncomment all the sections with [WEBHOOK] prefix. +# Do NOT uncomment sections with prefix [CERTMANAGER], as OLM does not support cert-manager. +# These patches remove the unnecessary "cert" volume and its manager container volumeMount. +#patches: +#- target: +# group: apps +# version: v1 +# kind: Deployment +# name: controller-manager +# namespace: system +# patch: |- +# # Remove the manager container's "cert" volumeMount, since OLM will create and mount a set of certs. +# # Update the indices in this path if adding or removing containers/volumeMounts in the manager's Deployment. +# - op: remove + +# path: /spec/template/spec/containers/0/volumeMounts/0 +# # Remove the "cert" volume, since OLM will create and mount a set of certs. +# # Update the indices in this path if adding or removing volumes in the manager's Deployment. +# - op: remove +# path: /spec/template/spec/volumes/0 diff --git a/controller/deploy/operator/config/network-policy/allow-metrics-traffic.yaml b/controller/deploy/operator/config/network-policy/allow-metrics-traffic.yaml new file mode 100644 index 000000000..a98e1f80d --- /dev/null +++ b/controller/deploy/operator/config/network-policy/allow-metrics-traffic.yaml @@ -0,0 +1,27 @@ +# This NetworkPolicy allows ingress traffic +# with Pods running on namespaces labeled with 'metrics: enabled'. Only Pods on those +# namespaces are able to gather data from the metrics endpoint. +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + labels: + app.kubernetes.io/name: jumpstarter-operator + app.kubernetes.io/managed-by: kustomize + name: allow-metrics-traffic + namespace: system +spec: + podSelector: + matchLabels: + control-plane: controller-manager + app.kubernetes.io/name: jumpstarter-operator + policyTypes: + - Ingress + ingress: + # This allows ingress traffic from any namespace with the label metrics: enabled + - from: + - namespaceSelector: + matchLabels: + metrics: enabled # Only from namespaces with this label + ports: + - port: 8443 + protocol: TCP diff --git a/controller/deploy/operator/config/network-policy/kustomization.yaml b/controller/deploy/operator/config/network-policy/kustomization.yaml new file mode 100644 index 000000000..ec0fb5e57 --- /dev/null +++ b/controller/deploy/operator/config/network-policy/kustomization.yaml @@ -0,0 +1,2 @@ +resources: +- allow-metrics-traffic.yaml diff --git a/controller/deploy/operator/config/prometheus/kustomization.yaml b/controller/deploy/operator/config/prometheus/kustomization.yaml new file mode 100644 index 000000000..fdc5481b1 --- /dev/null +++ b/controller/deploy/operator/config/prometheus/kustomization.yaml @@ -0,0 +1,11 @@ +resources: +- monitor.yaml + +# [PROMETHEUS-WITH-CERTS] The following patch configures the ServiceMonitor in ../prometheus +# to securely reference certificates created and managed by cert-manager. +# Additionally, ensure that you uncomment the [METRICS WITH CERTMANAGER] patch under config/default/kustomization.yaml +# to mount the "metrics-server-cert" secret in the Manager Deployment. +#patches: +# - path: monitor_tls_patch.yaml +# target: +# kind: ServiceMonitor diff --git a/controller/deploy/operator/config/prometheus/monitor.yaml b/controller/deploy/operator/config/prometheus/monitor.yaml new file mode 100644 index 000000000..55e9901c0 --- /dev/null +++ b/controller/deploy/operator/config/prometheus/monitor.yaml @@ -0,0 +1,27 @@ +# Prometheus Monitor Service (Metrics) +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + labels: + control-plane: controller-manager + app.kubernetes.io/name: jumpstarter-operator + app.kubernetes.io/managed-by: kustomize + name: controller-manager-metrics-monitor + namespace: system +spec: + endpoints: + - path: /metrics + port: https # Ensure this is the name of the port that exposes HTTPS metrics + scheme: https + bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + tlsConfig: + # TODO(user): The option insecureSkipVerify: true is not recommended for production since it disables + # certificate verification, exposing the system to potential man-in-the-middle attacks. + # For production environments, it is recommended to use cert-manager for automatic TLS certificate management. + # To apply this configuration, enable cert-manager and use the patch located at config/prometheus/servicemonitor_tls_patch.yaml, + # which securely references the certificate from the 'metrics-server-cert' secret. + insecureSkipVerify: true + selector: + matchLabels: + control-plane: controller-manager + app.kubernetes.io/name: jumpstarter-operator diff --git a/controller/deploy/operator/config/prometheus/monitor_tls_patch.yaml b/controller/deploy/operator/config/prometheus/monitor_tls_patch.yaml new file mode 100644 index 000000000..5bf84ce0d --- /dev/null +++ b/controller/deploy/operator/config/prometheus/monitor_tls_patch.yaml @@ -0,0 +1,19 @@ +# Patch for Prometheus ServiceMonitor to enable secure TLS configuration +# using certificates managed by cert-manager +- op: replace + path: /spec/endpoints/0/tlsConfig + value: + # SERVICE_NAME and SERVICE_NAMESPACE will be substituted by kustomize + serverName: SERVICE_NAME.SERVICE_NAMESPACE.svc + insecureSkipVerify: false + ca: + secret: + name: metrics-server-cert + key: ca.crt + cert: + secret: + name: metrics-server-cert + key: tls.crt + keySecret: + name: metrics-server-cert + key: tls.key diff --git a/controller/deploy/operator/config/rbac/jumpstarter_admin_role.yaml b/controller/deploy/operator/config/rbac/jumpstarter_admin_role.yaml new file mode 100644 index 000000000..6b5e2314b --- /dev/null +++ b/controller/deploy/operator/config/rbac/jumpstarter_admin_role.yaml @@ -0,0 +1,27 @@ +# This rule is not used by the project jumpstarter-operator itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants full permissions ('*') over operator.jumpstarter.dev. +# This role is intended for users authorized to modify roles and bindings within the cluster, +# enabling them to delegate specific permissions to other users or groups as needed. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: jumpstarter-operator + app.kubernetes.io/managed-by: kustomize + name: jumpstarter-admin-role +rules: +- apiGroups: + - operator.jumpstarter.dev + resources: + - jumpstarters + verbs: + - '*' +- apiGroups: + - operator.jumpstarter.dev + resources: + - jumpstarters/status + verbs: + - get diff --git a/controller/deploy/operator/config/rbac/jumpstarter_editor_role.yaml b/controller/deploy/operator/config/rbac/jumpstarter_editor_role.yaml new file mode 100644 index 000000000..2fc0d8d9c --- /dev/null +++ b/controller/deploy/operator/config/rbac/jumpstarter_editor_role.yaml @@ -0,0 +1,33 @@ +# This rule is not used by the project jumpstarter-operator itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants permissions to create, update, and delete resources within the operator.jumpstarter.dev. +# This role is intended for users who need to manage these resources +# but should not control RBAC or manage permissions for others. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: jumpstarter-operator + app.kubernetes.io/managed-by: kustomize + name: jumpstarter-editor-role +rules: +- apiGroups: + - operator.jumpstarter.dev + resources: + - jumpstarters + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - operator.jumpstarter.dev + resources: + - jumpstarters/status + verbs: + - get diff --git a/controller/deploy/operator/config/rbac/jumpstarter_viewer_role.yaml b/controller/deploy/operator/config/rbac/jumpstarter_viewer_role.yaml new file mode 100644 index 000000000..dd33715e7 --- /dev/null +++ b/controller/deploy/operator/config/rbac/jumpstarter_viewer_role.yaml @@ -0,0 +1,29 @@ +# This rule is not used by the project jumpstarter-operator itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants read-only access to operator.jumpstarter.dev resources. +# This role is intended for users who need visibility into these resources +# without permissions to modify them. It is ideal for monitoring purposes and limited-access viewing. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: jumpstarter-operator + app.kubernetes.io/managed-by: kustomize + name: jumpstarter-viewer-role +rules: +- apiGroups: + - operator.jumpstarter.dev + resources: + - jumpstarters + verbs: + - get + - list + - watch +- apiGroups: + - operator.jumpstarter.dev + resources: + - jumpstarters/status + verbs: + - get diff --git a/controller/deploy/operator/config/rbac/kustomization.yaml b/controller/deploy/operator/config/rbac/kustomization.yaml new file mode 100644 index 000000000..dcc20977d --- /dev/null +++ b/controller/deploy/operator/config/rbac/kustomization.yaml @@ -0,0 +1,28 @@ +resources: +# All RBAC will be applied under this service account in +# the deployment namespace. You may comment out this resource +# if your manager will use a service account that exists at +# runtime. Be sure to update RoleBinding and ClusterRoleBinding +# subjects if changing service account names. +- service_account.yaml +- role.yaml +- role_binding.yaml +- leader_election_role.yaml +- leader_election_role_binding.yaml +# The following RBAC configurations are used to protect +# the metrics endpoint with authn/authz. These configurations +# ensure that only authorized users and service accounts +# can access the metrics endpoint. Comment the following +# permissions if you want to disable this protection. +# More info: https://book.kubebuilder.io/reference/metrics.html +- metrics_auth_role.yaml +- metrics_auth_role_binding.yaml +- metrics_reader_role.yaml +# For each CRD, "Admin", "Editor" and "Viewer" roles are scaffolded by +# default, aiding admins in cluster management. Those roles are +# not used by the jumpstarter-operator itself. You can comment the following lines +# if you do not want those helpers be installed with your Project. +- jumpstarter_admin_role.yaml +- jumpstarter_editor_role.yaml +- jumpstarter_viewer_role.yaml + diff --git a/controller/deploy/operator/config/rbac/leader_election_role.yaml b/controller/deploy/operator/config/rbac/leader_election_role.yaml new file mode 100644 index 000000000..8ab05938b --- /dev/null +++ b/controller/deploy/operator/config/rbac/leader_election_role.yaml @@ -0,0 +1,40 @@ +# permissions to do leader election. +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/name: jumpstarter-operator + app.kubernetes.io/managed-by: kustomize + name: leader-election-role +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch diff --git a/controller/deploy/operator/config/rbac/leader_election_role_binding.yaml b/controller/deploy/operator/config/rbac/leader_election_role_binding.yaml new file mode 100644 index 000000000..ab0ec5782 --- /dev/null +++ b/controller/deploy/operator/config/rbac/leader_election_role_binding.yaml @@ -0,0 +1,15 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/name: jumpstarter-operator + app.kubernetes.io/managed-by: kustomize + name: leader-election-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: leader-election-role +subjects: +- kind: ServiceAccount + name: controller-manager + namespace: system diff --git a/controller/deploy/operator/config/rbac/metrics_auth_role.yaml b/controller/deploy/operator/config/rbac/metrics_auth_role.yaml new file mode 100644 index 000000000..32d2e4ec6 --- /dev/null +++ b/controller/deploy/operator/config/rbac/metrics_auth_role.yaml @@ -0,0 +1,17 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: metrics-auth-role +rules: +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create diff --git a/controller/deploy/operator/config/rbac/metrics_auth_role_binding.yaml b/controller/deploy/operator/config/rbac/metrics_auth_role_binding.yaml new file mode 100644 index 000000000..e775d67ff --- /dev/null +++ b/controller/deploy/operator/config/rbac/metrics_auth_role_binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: metrics-auth-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: metrics-auth-role +subjects: +- kind: ServiceAccount + name: controller-manager + namespace: system diff --git a/controller/deploy/operator/config/rbac/metrics_reader_role.yaml b/controller/deploy/operator/config/rbac/metrics_reader_role.yaml new file mode 100644 index 000000000..ecb2e018e --- /dev/null +++ b/controller/deploy/operator/config/rbac/metrics_reader_role.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: metrics-reader + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: jumpstarter-operator +rules: +- nonResourceURLs: + - "/metrics" + verbs: + - get diff --git a/controller/deploy/operator/config/rbac/role.yaml b/controller/deploy/operator/config/rbac/role.yaml new file mode 100644 index 000000000..2eb21cdeb --- /dev/null +++ b/controller/deploy/operator/config/rbac/role.yaml @@ -0,0 +1,211 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: manager-role +rules: +- apiGroups: + - "" + resources: + - configmaps + - secrets + - serviceaccounts + - services + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - "" + resources: + - services/status + verbs: + - get + - patch + - update +- apiGroups: + - apps + resources: + - deployments + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - apps + resources: + - deployments/status + verbs: + - get + - patch + - update +- apiGroups: + - config.openshift.io + resources: + - ingresses + verbs: + - get + - list + - watch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - jumpstarter.dev + resources: + - clients + - exporteraccesspolicies + - exporters + - leases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - jumpstarter.dev + resources: + - clients/finalizers + - exporteraccesspolicies/finalizers + - exporters/finalizers + - leases/finalizers + verbs: + - update +- apiGroups: + - jumpstarter.dev + resources: + - clients/status + - exporteraccesspolicies/status + - exporters/status + - leases/status + verbs: + - get + - patch + - update +- apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - get + - patch + - update +- apiGroups: + - operator.jumpstarter.dev + resources: + - jumpstarters + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - operator.jumpstarter.dev + resources: + - jumpstarters/finalizers + verbs: + - update +- apiGroups: + - operator.jumpstarter.dev + resources: + - jumpstarters/status + verbs: + - get + - patch + - update +- apiGroups: + - rbac.authorization.k8s.io + resources: + - rolebindings + - roles + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - route.openshift.io + resources: + - routes + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - route.openshift.io + resources: + - routes/custom-host + verbs: + - create + - get + - patch + - update +- apiGroups: + - route.openshift.io + resources: + - routes/status + verbs: + - get + - patch + - update diff --git a/controller/deploy/operator/config/rbac/role_binding.yaml b/controller/deploy/operator/config/rbac/role_binding.yaml new file mode 100644 index 000000000..7575bd4bd --- /dev/null +++ b/controller/deploy/operator/config/rbac/role_binding.yaml @@ -0,0 +1,15 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/name: jumpstarter-operator + app.kubernetes.io/managed-by: kustomize + name: manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: manager-role +subjects: +- kind: ServiceAccount + name: controller-manager + namespace: system diff --git a/controller/deploy/operator/config/rbac/service_account.yaml b/controller/deploy/operator/config/rbac/service_account.yaml new file mode 100644 index 000000000..c19fcdace --- /dev/null +++ b/controller/deploy/operator/config/rbac/service_account.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/name: jumpstarter-operator + app.kubernetes.io/managed-by: kustomize + name: controller-manager + namespace: system diff --git a/controller/deploy/operator/config/samples/kustomization.yaml b/controller/deploy/operator/config/samples/kustomization.yaml new file mode 100644 index 000000000..3704fe7a6 --- /dev/null +++ b/controller/deploy/operator/config/samples/kustomization.yaml @@ -0,0 +1,4 @@ +## Append samples of your project ## +resources: +- operator_v1alpha1_jumpstarter.yaml +# +kubebuilder:scaffold:manifestskustomizesamples diff --git a/controller/deploy/operator/config/samples/operator_v1alpha1_jumpstarter.yaml b/controller/deploy/operator/config/samples/operator_v1alpha1_jumpstarter.yaml new file mode 100644 index 000000000..af24fbf0a --- /dev/null +++ b/controller/deploy/operator/config/samples/operator_v1alpha1_jumpstarter.yaml @@ -0,0 +1,9 @@ +apiVersion: operator.jumpstarter.dev/v1alpha1 +kind: Jumpstarter +metadata: + labels: + app.kubernetes.io/name: jumpstarter-operator + app.kubernetes.io/managed-by: kustomize + name: jumpstarter-sample +spec: + # TODO(user): Add fields here diff --git a/controller/deploy/operator/config/scorecard/bases/config.yaml b/controller/deploy/operator/config/scorecard/bases/config.yaml new file mode 100644 index 000000000..c77047841 --- /dev/null +++ b/controller/deploy/operator/config/scorecard/bases/config.yaml @@ -0,0 +1,7 @@ +apiVersion: scorecard.operatorframework.io/v1alpha3 +kind: Configuration +metadata: + name: config +stages: +- parallel: true + tests: [] diff --git a/controller/deploy/operator/config/scorecard/kustomization.yaml b/controller/deploy/operator/config/scorecard/kustomization.yaml new file mode 100644 index 000000000..54e8aa507 --- /dev/null +++ b/controller/deploy/operator/config/scorecard/kustomization.yaml @@ -0,0 +1,18 @@ +resources: +- bases/config.yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +patches: +- path: patches/basic.config.yaml + target: + group: scorecard.operatorframework.io + kind: Configuration + name: config + version: v1alpha3 +- path: patches/olm.config.yaml + target: + group: scorecard.operatorframework.io + kind: Configuration + name: config + version: v1alpha3 +# +kubebuilder:scaffold:patches diff --git a/controller/deploy/operator/config/scorecard/patches/basic.config.yaml b/controller/deploy/operator/config/scorecard/patches/basic.config.yaml new file mode 100644 index 000000000..8237b70d8 --- /dev/null +++ b/controller/deploy/operator/config/scorecard/patches/basic.config.yaml @@ -0,0 +1,10 @@ +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - basic-check-spec + image: quay.io/operator-framework/scorecard-test:v1.41.1 + labels: + suite: basic + test: basic-check-spec-test diff --git a/controller/deploy/operator/config/scorecard/patches/olm.config.yaml b/controller/deploy/operator/config/scorecard/patches/olm.config.yaml new file mode 100644 index 000000000..416660a77 --- /dev/null +++ b/controller/deploy/operator/config/scorecard/patches/olm.config.yaml @@ -0,0 +1,50 @@ +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - olm-bundle-validation + image: quay.io/operator-framework/scorecard-test:v1.41.1 + labels: + suite: olm + test: olm-bundle-validation-test +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - olm-crds-have-validation + image: quay.io/operator-framework/scorecard-test:v1.41.1 + labels: + suite: olm + test: olm-crds-have-validation-test +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - olm-crds-have-resources + image: quay.io/operator-framework/scorecard-test:v1.41.1 + labels: + suite: olm + test: olm-crds-have-resources-test +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - olm-spec-descriptors + image: quay.io/operator-framework/scorecard-test:v1.41.1 + labels: + suite: olm + test: olm-spec-descriptors-test +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - olm-status-descriptors + image: quay.io/operator-framework/scorecard-test:v1.41.1 + labels: + suite: olm + test: olm-status-descriptors-test diff --git a/controller/deploy/operator/dist/install.yaml b/controller/deploy/operator/dist/install.yaml new file mode 100644 index 000000000..818b075bd --- /dev/null +++ b/controller/deploy/operator/dist/install.yaml @@ -0,0 +1,3045 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: jumpstarter-operator + control-plane: controller-manager + name: jumpstarter-operator-system +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.3 + name: clients.jumpstarter.dev +spec: + group: jumpstarter.dev + names: + kind: Client + listKind: ClientList + plural: clients + singular: client + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: Client is the Schema for the identities API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ClientSpec defines the desired state of Identity + properties: + username: + type: string + type: object + status: + description: ClientStatus defines the observed state of Identity + properties: + credential: + description: Status field for the clients + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + endpoint: + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.3 + name: exporteraccesspolicies.jumpstarter.dev +spec: + group: jumpstarter.dev + names: + kind: ExporterAccessPolicy + listKind: ExporterAccessPolicyList + plural: exporteraccesspolicies + singular: exporteraccesspolicy + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: ExporterAccessPolicy is the Schema for the exporteraccesspolicies + API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ExporterAccessPolicySpec defines the desired state of ExporterAccessPolicy. + properties: + exporterSelector: + description: |- + A label selector is a label query over a set of resources. The result of matchLabels and + matchExpressions are ANDed. An empty label selector matches all objects. A null + label selector matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + policies: + items: + properties: + from: + items: + properties: + clientSelector: + description: |- + A label selector is a label query over a set of resources. The result of matchLabels and + matchExpressions are ANDed. An empty label selector matches all objects. A null + label selector matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: object + type: array + maximumDuration: + type: string + priority: + type: integer + spotAccess: + type: boolean + type: object + type: array + type: object + status: + description: ExporterAccessPolicyStatus defines the observed state of + ExporterAccessPolicy. + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.3 + name: exporters.jumpstarter.dev +spec: + group: jumpstarter.dev + names: + kind: Exporter + listKind: ExporterList + plural: exporters + singular: exporter + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: Exporter is the Schema for the exporters API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ExporterSpec defines the desired state of Exporter + properties: + username: + type: string + type: object + status: + description: ExporterStatus defines the observed state of Exporter + properties: + conditions: + description: Exporter status fields + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + credential: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + devices: + items: + properties: + labels: + additionalProperties: + type: string + type: object + parent_uuid: + type: string + uuid: + type: string + type: object + type: array + endpoint: + type: string + lastSeen: + format: date-time + type: string + leaseRef: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: jumpstarters.operator.jumpstarter.dev +spec: + group: operator.jumpstarter.dev + names: + kind: Jumpstarter + listKind: JumpstarterList + plural: jumpstarters + singular: jumpstarter + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: Jumpstarter is the Schema for the jumpstarters API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + JumpstarterSpec defines the desired state of a Jumpstarter deployment. A deployment + can be created in a namespace of the cluster, and that's where all the Jumpstarter + resources and services will reside. + properties: + authentication: + description: |- + Authentication configuration for client and exporter authentication. + Supports multiple authentication methods including internal tokens, Kubernetes tokens, and JWT. + properties: + internal: + description: |- + Internal authentication configuration. + Built-in authenticator that issues tokens for clients and exporters. + This is the simplest authentication method and is enabled by default. + properties: + enabled: + default: true + description: |- + Enable the internal authentication method. + When disabled, clients cannot use internal tokens for authentication. + type: boolean + prefix: + default: 'internal:' + description: |- + Prefix to add to the subject claim of issued tokens. + Helps distinguish internal tokens from other authentication methods. + Example: "internal:" will result in subjects like "internal:user123" + maxLength: 50 + type: string + tokenLifetime: + default: 43800h + description: |- + Token validity duration for issued tokens. + After this duration, tokens expire and must be renewed. + type: string + type: object + jwt: + description: |- + JWT authentication configuration. + Enables authentication using external JWT tokens from OIDC providers. + Supports multiple JWT authenticators for different identity providers. + items: + description: JWTAuthenticator provides the configuration for + a single JWT authenticator. + properties: + claimMappings: + description: claimMappings points claims of a token to be + treated as user attributes. + properties: + extra: + description: |- + extra represents an option for the extra attribute. + expression must produce a string or string array value. + If the value is empty, the extra mapping will not be present. + + hard-coded extra key/value + - key: "foo" + valueExpression: "'bar'" + This will result in an extra attribute - foo: ["bar"] + + hard-coded key, value copying claim value + - key: "foo" + valueExpression: "claims.some_claim" + This will result in an extra attribute - foo: [value of some_claim] + + hard-coded key, value derived from claim value + - key: "admin" + valueExpression: '(has(claims.is_admin) && claims.is_admin) ? "true":""' + This will result in: + - if is_admin claim is present and true, extra attribute - admin: ["true"] + - if is_admin claim is present and false or is_admin claim is not present, no extra attribute will be added + items: + description: ExtraMapping provides the configuration + for a single extra mapping. + properties: + key: + description: |- + key is a string to use as the extra attribute key. + key must be a domain-prefix path (e.g. example.org/foo). All characters before the first "/" must be a valid + subdomain as defined by RFC 1123. All characters trailing the first "/" must + be valid HTTP Path characters as defined by RFC 3986. + key must be lowercase. + Required to be unique. + type: string + valueExpression: + description: |- + valueExpression is a CEL expression to extract extra attribute value. + valueExpression must produce a string or string array value. + "", [], and null values are treated as the extra mapping not being present. + Empty string values contained within a string array are filtered out. + + CEL expressions have access to the contents of the token claims, organized into CEL variable: + - 'claims' is a map of claim names to claim values. + For example, a variable named 'sub' can be accessed as 'claims.sub'. + Nested claims can be accessed using dot notation, e.g. 'claims.foo.bar'. + + Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + type: string + required: + - key + - valueExpression + type: object + type: array + groups: + description: |- + groups represents an option for the groups attribute. + The claim's value must be a string or string array claim. + If groups.claim is set, the prefix must be specified (and can be the empty string). + If groups.expression is set, the expression must produce a string or string array value. + "", [], and null values are treated as the group mapping not being present. + properties: + claim: + description: |- + claim is the JWT claim to use. + Mutually exclusive with expression. + type: string + expression: + description: |- + expression represents the expression which will be evaluated by CEL. + + CEL expressions have access to the contents of the token claims, organized into CEL variable: + - 'claims' is a map of claim names to claim values. + For example, a variable named 'sub' can be accessed as 'claims.sub'. + Nested claims can be accessed using dot notation, e.g. 'claims.foo.bar'. + + Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + + Mutually exclusive with claim and prefix. + type: string + prefix: + description: |- + prefix is prepended to claim's value to prevent clashes with existing names. + prefix needs to be set if claim is set and can be the empty string. + Mutually exclusive with expression. + type: string + type: object + uid: + description: |- + uid represents an option for the uid attribute. + Claim must be a singular string claim. + If uid.expression is set, the expression must produce a string value. + properties: + claim: + description: |- + claim is the JWT claim to use. + Either claim or expression must be set. + Mutually exclusive with expression. + type: string + expression: + description: |- + expression represents the expression which will be evaluated by CEL. + + CEL expressions have access to the contents of the token claims, organized into CEL variable: + - 'claims' is a map of claim names to claim values. + For example, a variable named 'sub' can be accessed as 'claims.sub'. + Nested claims can be accessed using dot notation, e.g. 'claims.foo.bar'. + + Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + + Mutually exclusive with claim. + type: string + type: object + username: + description: |- + username represents an option for the username attribute. + The claim's value must be a singular string. + Same as the --oidc-username-claim and --oidc-username-prefix flags. + If username.expression is set, the expression must produce a string value. + If username.expression uses 'claims.email', then 'claims.email_verified' must be used in + username.expression or extra[*].valueExpression or claimValidationRules[*].expression. + An example claim validation rule expression that matches the validation automatically + applied when username.claim is set to 'email' is 'claims.?email_verified.orValue(true) == true'. By explicitly comparing + the value to true, we let type-checking see the result will be a boolean, and to make sure a non-boolean email_verified + claim will be caught at runtime. + + In the flag based approach, the --oidc-username-claim and --oidc-username-prefix are optional. If --oidc-username-claim is not set, + the default value is "sub". For the authentication config, there is no defaulting for claim or prefix. The claim and prefix must be set explicitly. + For claim, if --oidc-username-claim was not set with legacy flag approach, configure username.claim="sub" in the authentication config. + For prefix: + (1) --oidc-username-prefix="-", no prefix was added to the username. For the same behavior using authentication config, + set username.prefix="" + (2) --oidc-username-prefix="" and --oidc-username-claim != "email", prefix was "#". For the same + behavior using authentication config, set username.prefix="#" + (3) --oidc-username-prefix="". For the same behavior using authentication config, set username.prefix="" + properties: + claim: + description: |- + claim is the JWT claim to use. + Mutually exclusive with expression. + type: string + expression: + description: |- + expression represents the expression which will be evaluated by CEL. + + CEL expressions have access to the contents of the token claims, organized into CEL variable: + - 'claims' is a map of claim names to claim values. + For example, a variable named 'sub' can be accessed as 'claims.sub'. + Nested claims can be accessed using dot notation, e.g. 'claims.foo.bar'. + + Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + + Mutually exclusive with claim and prefix. + type: string + prefix: + description: |- + prefix is prepended to claim's value to prevent clashes with existing names. + prefix needs to be set if claim is set and can be the empty string. + Mutually exclusive with expression. + type: string + type: object + required: + - username + type: object + claimValidationRules: + description: claimValidationRules are rules that are applied + to validate token claims to authenticate users. + items: + description: ClaimValidationRule provides the configuration + for a single claim validation rule. + properties: + claim: + description: |- + claim is the name of a required claim. + Same as --oidc-required-claim flag. + Only string claim keys are supported. + Mutually exclusive with expression and message. + type: string + expression: + description: |- + expression represents the expression which will be evaluated by CEL. + Must produce a boolean. + + CEL expressions have access to the contents of the token claims, organized into CEL variable: + - 'claims' is a map of claim names to claim values. + For example, a variable named 'sub' can be accessed as 'claims.sub'. + Nested claims can be accessed using dot notation, e.g. 'claims.foo.bar'. + Must return true for the validation to pass. + + Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + + Mutually exclusive with claim and requiredValue. + type: string + message: + description: |- + message customizes the returned error message when expression returns false. + message is a literal string. + Mutually exclusive with claim and requiredValue. + type: string + requiredValue: + description: |- + requiredValue is the value of a required claim. + Same as --oidc-required-claim flag. + Only string claim values are supported. + If claim is set and requiredValue is not set, the claim must be present with a value set to the empty string. + Mutually exclusive with expression and message. + type: string + type: object + type: array + issuer: + description: issuer contains the basic OIDC provider connection + options. + properties: + audienceMatchPolicy: + description: |- + audienceMatchPolicy defines how the "audiences" field is used to match the "aud" claim in the presented JWT. + Allowed values are: + 1. "MatchAny" when multiple audiences are specified and + 2. empty (or unset) or "MatchAny" when a single audience is specified. + + - MatchAny: the "aud" claim in the presented JWT must match at least one of the entries in the "audiences" field. + For example, if "audiences" is ["foo", "bar"], the "aud" claim in the presented JWT must contain either "foo" or "bar" (and may contain both). + + - "": The match policy can be empty (or unset) when a single audience is specified in the "audiences" field. The "aud" claim in the presented JWT must contain the single audience (and may contain others). + + For more nuanced audience validation, use claimValidationRules. + example: claimValidationRule[].expression: 'sets.equivalent(claims.aud, ["bar", "foo", "baz"])' to require an exact match. + type: string + audiences: + description: |- + audiences is the set of acceptable audiences the JWT must be issued to. + At least one of the entries must match the "aud" claim in presented JWTs. + Same value as the --oidc-client-id flag (though this field supports an array). + Required to be non-empty. + items: + type: string + type: array + certificateAuthority: + description: |- + certificateAuthority contains PEM-encoded certificate authority certificates + used to validate the connection when fetching discovery information. + If unset, the system verifier is used. + Same value as the content of the file referenced by the --oidc-ca-file flag. + type: string + discoveryURL: + description: |- + discoveryURL, if specified, overrides the URL used to fetch discovery + information instead of using "{url}/.well-known/openid-configuration". + The exact value specified is used, so "/.well-known/openid-configuration" + must be included in discoveryURL if needed. + + The "issuer" field in the fetched discovery information must match the "issuer.url" field + in the AuthenticationConfiguration and will be used to validate the "iss" claim in the presented JWT. + This is for scenarios where the well-known and jwks endpoints are hosted at a different + location than the issuer (such as locally in the cluster). + + Example: + A discovery url that is exposed using kubernetes service 'oidc' in namespace 'oidc-namespace' + and discovery information is available at '/.well-known/openid-configuration'. + discoveryURL: "https://oidc.oidc-namespace/.well-known/openid-configuration" + certificateAuthority is used to verify the TLS connection and the hostname on the leaf certificate + must be set to 'oidc.oidc-namespace'. + + curl https://oidc.oidc-namespace/.well-known/openid-configuration (.discoveryURL field) + { + issuer: "https://oidc.example.com" (.url field) + } + + discoveryURL must be different from url. + Required to be unique across all JWT authenticators. + Note that egress selection configuration is not used for this network connection. + type: string + url: + description: |- + url points to the issuer URL in a format https://url or https://url/path. + This must match the "iss" claim in the presented JWT, and the issuer returned from discovery. + Same value as the --oidc-issuer-url flag. + Discovery information is fetched from "{url}/.well-known/openid-configuration" unless overridden by discoveryURL. + Required to be unique across all JWT authenticators. + Note that egress selection configuration is not used for this network connection. + type: string + required: + - audiences + - url + type: object + userValidationRules: + description: |- + userValidationRules are rules that are applied to final user before completing authentication. + These allow invariants to be applied to incoming identities such as preventing the + use of the system: prefix that is commonly used by Kubernetes components. + The validation rules are logically ANDed together and must all return true for the validation to pass. + items: + description: UserValidationRule provides the configuration + for a single user info validation rule. + properties: + expression: + description: |- + expression represents the expression which will be evaluated by CEL. + Must return true for the validation to pass. + + CEL expressions have access to the contents of UserInfo, organized into CEL variable: + - 'user' - authentication.k8s.io/v1, Kind=UserInfo object + Refer to https://github.com/kubernetes/api/blob/release-1.28/authentication/v1/types.go#L105-L122 for the definition. + API documentation: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.28/#userinfo-v1-authentication-k8s-io + + Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + type: string + message: + description: |- + message customizes the returned error message when rule returns false. + message is a literal string. + type: string + required: + - expression + type: object + type: array + required: + - claimMappings + - issuer + type: object + type: array + k8s: + description: |- + Kubernetes authentication configuration. + Enables authentication using Kubernetes service account tokens. + Useful for integrating with existing Kubernetes RBAC policies. + properties: + enabled: + default: false + description: |- + Enable Kubernetes authentication. + When enabled, clients can authenticate using Kubernetes service account tokens. + type: boolean + type: object + type: object + baseDomain: + description: |- + Base domain used to construct FQDNs for all service endpoints. + This domain will be used to generate the default hostnames for Routes, Ingresses, and certificates. + Example: "example.com" will generate endpoints like "grpc.example.com", "router.example.com" + pattern: ^[a-z0-9]([a-z0-9\-\.]*[a-z0-9])?$ + type: string + controller: + default: {} + description: |- + Controller configuration for the main Jumpstarter API and gRPC services. + The controller handles gRPC and REST API requests from clients and exporters. + properties: + authentication: + description: |- + Authentication configuration for client and exporter authentication. + Configures how clients and exporters can authenticate with Jumpstarter. + Supports multiple authentication methods including internal tokens, Kubernetes tokens, and JWT. + properties: + internal: + description: |- + Internal authentication configuration. + Built-in authenticator that issues tokens for clients and exporters. + This is the simplest authentication method and is enabled by default. + properties: + enabled: + default: true + description: |- + Enable the internal authentication method. + When disabled, clients cannot use internal tokens for authentication. + type: boolean + prefix: + default: 'internal:' + description: |- + Prefix to add to the subject claim of issued tokens. + Helps distinguish internal tokens from other authentication methods. + Example: "internal:" will result in subjects like "internal:user123" + maxLength: 50 + type: string + tokenLifetime: + default: 43800h + description: |- + Token validity duration for issued tokens. + After this duration, tokens expire and must be renewed. + type: string + type: object + jwt: + description: |- + JWT authentication configuration. + Enables authentication using external JWT tokens from OIDC providers. + Supports multiple JWT authenticators for different identity providers. + items: + description: JWTAuthenticator provides the configuration + for a single JWT authenticator. + properties: + claimMappings: + description: claimMappings points claims of a token + to be treated as user attributes. + properties: + extra: + description: |- + extra represents an option for the extra attribute. + expression must produce a string or string array value. + If the value is empty, the extra mapping will not be present. + + hard-coded extra key/value + - key: "foo" + valueExpression: "'bar'" + This will result in an extra attribute - foo: ["bar"] + + hard-coded key, value copying claim value + - key: "foo" + valueExpression: "claims.some_claim" + This will result in an extra attribute - foo: [value of some_claim] + + hard-coded key, value derived from claim value + - key: "admin" + valueExpression: '(has(claims.is_admin) && claims.is_admin) ? "true":""' + This will result in: + - if is_admin claim is present and true, extra attribute - admin: ["true"] + - if is_admin claim is present and false or is_admin claim is not present, no extra attribute will be added + items: + description: ExtraMapping provides the configuration + for a single extra mapping. + properties: + key: + description: |- + key is a string to use as the extra attribute key. + key must be a domain-prefix path (e.g. example.org/foo). All characters before the first "/" must be a valid + subdomain as defined by RFC 1123. All characters trailing the first "/" must + be valid HTTP Path characters as defined by RFC 3986. + key must be lowercase. + Required to be unique. + type: string + valueExpression: + description: |- + valueExpression is a CEL expression to extract extra attribute value. + valueExpression must produce a string or string array value. + "", [], and null values are treated as the extra mapping not being present. + Empty string values contained within a string array are filtered out. + + CEL expressions have access to the contents of the token claims, organized into CEL variable: + - 'claims' is a map of claim names to claim values. + For example, a variable named 'sub' can be accessed as 'claims.sub'. + Nested claims can be accessed using dot notation, e.g. 'claims.foo.bar'. + + Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + type: string + required: + - key + - valueExpression + type: object + type: array + groups: + description: |- + groups represents an option for the groups attribute. + The claim's value must be a string or string array claim. + If groups.claim is set, the prefix must be specified (and can be the empty string). + If groups.expression is set, the expression must produce a string or string array value. + "", [], and null values are treated as the group mapping not being present. + properties: + claim: + description: |- + claim is the JWT claim to use. + Mutually exclusive with expression. + type: string + expression: + description: |- + expression represents the expression which will be evaluated by CEL. + + CEL expressions have access to the contents of the token claims, organized into CEL variable: + - 'claims' is a map of claim names to claim values. + For example, a variable named 'sub' can be accessed as 'claims.sub'. + Nested claims can be accessed using dot notation, e.g. 'claims.foo.bar'. + + Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + + Mutually exclusive with claim and prefix. + type: string + prefix: + description: |- + prefix is prepended to claim's value to prevent clashes with existing names. + prefix needs to be set if claim is set and can be the empty string. + Mutually exclusive with expression. + type: string + type: object + uid: + description: |- + uid represents an option for the uid attribute. + Claim must be a singular string claim. + If uid.expression is set, the expression must produce a string value. + properties: + claim: + description: |- + claim is the JWT claim to use. + Either claim or expression must be set. + Mutually exclusive with expression. + type: string + expression: + description: |- + expression represents the expression which will be evaluated by CEL. + + CEL expressions have access to the contents of the token claims, organized into CEL variable: + - 'claims' is a map of claim names to claim values. + For example, a variable named 'sub' can be accessed as 'claims.sub'. + Nested claims can be accessed using dot notation, e.g. 'claims.foo.bar'. + + Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + + Mutually exclusive with claim. + type: string + type: object + username: + description: |- + username represents an option for the username attribute. + The claim's value must be a singular string. + Same as the --oidc-username-claim and --oidc-username-prefix flags. + If username.expression is set, the expression must produce a string value. + If username.expression uses 'claims.email', then 'claims.email_verified' must be used in + username.expression or extra[*].valueExpression or claimValidationRules[*].expression. + An example claim validation rule expression that matches the validation automatically + applied when username.claim is set to 'email' is 'claims.?email_verified.orValue(true) == true'. By explicitly comparing + the value to true, we let type-checking see the result will be a boolean, and to make sure a non-boolean email_verified + claim will be caught at runtime. + + In the flag based approach, the --oidc-username-claim and --oidc-username-prefix are optional. If --oidc-username-claim is not set, + the default value is "sub". For the authentication config, there is no defaulting for claim or prefix. The claim and prefix must be set explicitly. + For claim, if --oidc-username-claim was not set with legacy flag approach, configure username.claim="sub" in the authentication config. + For prefix: + (1) --oidc-username-prefix="-", no prefix was added to the username. For the same behavior using authentication config, + set username.prefix="" + (2) --oidc-username-prefix="" and --oidc-username-claim != "email", prefix was "#". For the same + behavior using authentication config, set username.prefix="#" + (3) --oidc-username-prefix="". For the same behavior using authentication config, set username.prefix="" + properties: + claim: + description: |- + claim is the JWT claim to use. + Mutually exclusive with expression. + type: string + expression: + description: |- + expression represents the expression which will be evaluated by CEL. + + CEL expressions have access to the contents of the token claims, organized into CEL variable: + - 'claims' is a map of claim names to claim values. + For example, a variable named 'sub' can be accessed as 'claims.sub'. + Nested claims can be accessed using dot notation, e.g. 'claims.foo.bar'. + + Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + + Mutually exclusive with claim and prefix. + type: string + prefix: + description: |- + prefix is prepended to claim's value to prevent clashes with existing names. + prefix needs to be set if claim is set and can be the empty string. + Mutually exclusive with expression. + type: string + type: object + required: + - username + type: object + claimValidationRules: + description: claimValidationRules are rules that are + applied to validate token claims to authenticate users. + items: + description: ClaimValidationRule provides the configuration + for a single claim validation rule. + properties: + claim: + description: |- + claim is the name of a required claim. + Same as --oidc-required-claim flag. + Only string claim keys are supported. + Mutually exclusive with expression and message. + type: string + expression: + description: |- + expression represents the expression which will be evaluated by CEL. + Must produce a boolean. + + CEL expressions have access to the contents of the token claims, organized into CEL variable: + - 'claims' is a map of claim names to claim values. + For example, a variable named 'sub' can be accessed as 'claims.sub'. + Nested claims can be accessed using dot notation, e.g. 'claims.foo.bar'. + Must return true for the validation to pass. + + Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + + Mutually exclusive with claim and requiredValue. + type: string + message: + description: |- + message customizes the returned error message when expression returns false. + message is a literal string. + Mutually exclusive with claim and requiredValue. + type: string + requiredValue: + description: |- + requiredValue is the value of a required claim. + Same as --oidc-required-claim flag. + Only string claim values are supported. + If claim is set and requiredValue is not set, the claim must be present with a value set to the empty string. + Mutually exclusive with expression and message. + type: string + type: object + type: array + issuer: + description: issuer contains the basic OIDC provider + connection options. + properties: + audienceMatchPolicy: + description: |- + audienceMatchPolicy defines how the "audiences" field is used to match the "aud" claim in the presented JWT. + Allowed values are: + 1. "MatchAny" when multiple audiences are specified and + 2. empty (or unset) or "MatchAny" when a single audience is specified. + + - MatchAny: the "aud" claim in the presented JWT must match at least one of the entries in the "audiences" field. + For example, if "audiences" is ["foo", "bar"], the "aud" claim in the presented JWT must contain either "foo" or "bar" (and may contain both). + + - "": The match policy can be empty (or unset) when a single audience is specified in the "audiences" field. The "aud" claim in the presented JWT must contain the single audience (and may contain others). + + For more nuanced audience validation, use claimValidationRules. + example: claimValidationRule[].expression: 'sets.equivalent(claims.aud, ["bar", "foo", "baz"])' to require an exact match. + type: string + audiences: + description: |- + audiences is the set of acceptable audiences the JWT must be issued to. + At least one of the entries must match the "aud" claim in presented JWTs. + Same value as the --oidc-client-id flag (though this field supports an array). + Required to be non-empty. + items: + type: string + type: array + certificateAuthority: + description: |- + certificateAuthority contains PEM-encoded certificate authority certificates + used to validate the connection when fetching discovery information. + If unset, the system verifier is used. + Same value as the content of the file referenced by the --oidc-ca-file flag. + type: string + discoveryURL: + description: |- + discoveryURL, if specified, overrides the URL used to fetch discovery + information instead of using "{url}/.well-known/openid-configuration". + The exact value specified is used, so "/.well-known/openid-configuration" + must be included in discoveryURL if needed. + + The "issuer" field in the fetched discovery information must match the "issuer.url" field + in the AuthenticationConfiguration and will be used to validate the "iss" claim in the presented JWT. + This is for scenarios where the well-known and jwks endpoints are hosted at a different + location than the issuer (such as locally in the cluster). + + Example: + A discovery url that is exposed using kubernetes service 'oidc' in namespace 'oidc-namespace' + and discovery information is available at '/.well-known/openid-configuration'. + discoveryURL: "https://oidc.oidc-namespace/.well-known/openid-configuration" + certificateAuthority is used to verify the TLS connection and the hostname on the leaf certificate + must be set to 'oidc.oidc-namespace'. + + curl https://oidc.oidc-namespace/.well-known/openid-configuration (.discoveryURL field) + { + issuer: "https://oidc.example.com" (.url field) + } + + discoveryURL must be different from url. + Required to be unique across all JWT authenticators. + Note that egress selection configuration is not used for this network connection. + type: string + url: + description: |- + url points to the issuer URL in a format https://url or https://url/path. + This must match the "iss" claim in the presented JWT, and the issuer returned from discovery. + Same value as the --oidc-issuer-url flag. + Discovery information is fetched from "{url}/.well-known/openid-configuration" unless overridden by discoveryURL. + Required to be unique across all JWT authenticators. + Note that egress selection configuration is not used for this network connection. + type: string + required: + - audiences + - url + type: object + userValidationRules: + description: |- + userValidationRules are rules that are applied to final user before completing authentication. + These allow invariants to be applied to incoming identities such as preventing the + use of the system: prefix that is commonly used by Kubernetes components. + The validation rules are logically ANDed together and must all return true for the validation to pass. + items: + description: UserValidationRule provides the configuration + for a single user info validation rule. + properties: + expression: + description: |- + expression represents the expression which will be evaluated by CEL. + Must return true for the validation to pass. + + CEL expressions have access to the contents of UserInfo, organized into CEL variable: + - 'user' - authentication.k8s.io/v1, Kind=UserInfo object + Refer to https://github.com/kubernetes/api/blob/release-1.28/authentication/v1/types.go#L105-L122 for the definition. + API documentation: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.28/#userinfo-v1-authentication-k8s-io + + Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + type: string + message: + description: |- + message customizes the returned error message when rule returns false. + message is a literal string. + type: string + required: + - expression + type: object + type: array + required: + - claimMappings + - issuer + type: object + type: array + k8s: + description: |- + Kubernetes authentication configuration. + Enables authentication using Kubernetes service account tokens. + Useful for integrating with existing Kubernetes RBAC policies. + properties: + enabled: + default: false + description: |- + Enable Kubernetes authentication. + When enabled, clients can authenticate using Kubernetes service account tokens. + type: boolean + type: object + type: object + exporterOptions: + description: |- + Exporter options configuration. + Controls how exporters connect and behave when communicating with the controller. + properties: + offlineTimeout: + default: 180s + description: |- + Offline timeout duration for exporters. + After this duration without communication, an exporter is considered offline. + This drives the online/offline status field of exporters, and offline exporters + won't be considered for leases. + type: string + type: object + grpc: + description: |- + gRPC configuration for controller endpoints. + Defines how controller gRPC services are exposed and configured. + properties: + endpoints: + description: |- + List of gRPC endpoints to expose. + Each endpoint can use different networking methods (Route, Ingress, NodePort, or LoadBalancer) + based on your cluster setup. Example: Use Route for OpenShift, Ingress for standard Kubernetes. + items: + description: |- + Endpoint defines a single endpoint configuration. + An endpoint can use one or more networking methods: Route, Ingress, NodePort, or LoadBalancer. + Multiple methods can be configured simultaneously for the same address. + properties: + address: + description: |- + Address for this endpoint in the format "hostname", "hostname:port", "IPv4", "IPv4:port", "[IPv6]", or "[IPv6]:port". + Required for Route and Ingress endpoints. Optional for NodePort and LoadBalancer endpoints. + When optional, the address is used for certificate generation and DNS resolution. + Supports templating with $(replica) for replica-specific addresses. + Examples: "grpc.example.com", "grpc.example.com:9090", "192.168.1.1:8080", "[2001:db8::1]:8443", "router-$(replica).example.com" + pattern: ^(\[[0-9a-fA-F:\.]+\]|[0-9]+(\.[0-9]+){3}|[a-z0-9$]([a-z0-9\-\.\$\(\)]*[a-z0-9\)])?)(:[0-9]+)?$ + type: string + clusterIP: + description: |- + ClusterIP configuration for internal service access. + Creates a ClusterIP service for this endpoint. + Useful for internal service-to-service communication or when + using a different method to expose the service externally. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations to add to the ClusterIP service. + Useful for configuring service-specific behavior and load balancer options. + type: object + enabled: + description: |- + Enable the ClusterIP service for this endpoint. + When disabled, no ClusterIP service will be created for this endpoint. + type: boolean + labels: + additionalProperties: + type: string + description: |- + Labels to add to the ClusterIP service. + Useful for monitoring, cost allocation, and resource organization. + type: object + type: object + ingress: + description: |- + Ingress configuration for standard Kubernetes clusters. + Creates an Ingress resource for this endpoint. + Requires an ingress controller to be installed. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations to add to the Kubernetes Ingress resource. + Useful for configuring ingress-specific behavior, TLS settings, and load balancer options. + type: object + class: + default: default + description: |- + Ingress class name for the Kubernetes Ingress. + Specifies which ingress controller should handle this ingress. + type: string + enabled: + description: |- + Enable the Kubernetes Ingress for this endpoint. + When disabled, no Ingress resource will be created for this endpoint. + When not specified, the operator will determine the best networking option for your cluster. + type: boolean + labels: + additionalProperties: + type: string + description: |- + Labels to add to the Kubernetes Ingress resource. + Useful for monitoring, cost allocation, and resource organization. + type: object + type: object + loadBalancer: + description: |- + LoadBalancer configuration for cloud environments. + Creates a LoadBalancer service for this endpoint. + Requires cloud provider support for LoadBalancer services. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations to add to the LoadBalancer service. + Useful for configuring cloud provider-specific load balancer options. + Example: "service.beta.kubernetes.io/aws-load-balancer-type: nlb" + type: object + enabled: + description: |- + Enable the LoadBalancer service for this endpoint. + When disabled, no LoadBalancer service will be created for this endpoint. + When not specified, the operator will determine the best networking option for your cluster. + type: boolean + labels: + additionalProperties: + type: string + description: |- + Labels to add to the LoadBalancer service. + Useful for monitoring, cost allocation, and resource organization. + type: object + port: + description: |- + Port number for the LoadBalancer service. + Must be a valid port number (1-65535). + format: int32 + maximum: 65535 + minimum: 1 + type: integer + type: object + nodeport: + description: |- + NodePort configuration for direct node access. + Exposes the service on a specific port on each node. + Useful for bare-metal or simple cluster setups. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations to add to the NodePort service. + Useful for configuring service-specific behavior and load balancer options. + type: object + enabled: + description: |- + Enable the NodePort service for this endpoint. + When disabled, no NodePort service will be created for this endpoint. + When not specified, the operator will determine the best networking option for your cluster. + type: boolean + labels: + additionalProperties: + type: string + description: |- + Labels to add to the NodePort service. + Useful for monitoring, cost allocation, and resource organization. + type: object + port: + description: |- + NodePort port number to expose on each node. + Must be in the range 30000-32767 for most Kubernetes clusters. + format: int32 + maximum: 65535 + minimum: 1 + type: integer + type: object + route: + description: |- + Route configuration for OpenShift clusters. + Creates an OpenShift Route resource for this endpoint. + Only applicable in OpenShift environments. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations to add to the OpenShift Route resource. + Useful for configuring route-specific behavior and TLS settings. + type: object + enabled: + description: |- + Enable the OpenShift Route for this endpoint. + When disabled, no Route resource will be created for this endpoint. + When not specified, the operator will determine the best networking option for your cluster. + type: boolean + labels: + additionalProperties: + type: string + description: |- + Labels to add to the OpenShift Route resource. + Useful for monitoring, cost allocation, and resource organization. + type: object + type: object + type: object + type: array + keepalive: + description: |- + Keepalive configuration for gRPC connections. + Controls connection health checks and idle connection management. + Helps maintain stable connections in load-balanced environments. + properties: + intervalTime: + default: 10s + description: |- + Interval between keepalive pings. + How often to send keepalive pings to check connection health. This is important + to keep TCP gRPC connections alive when traversing load balancers and proxies. + type: string + maxConnectionAge: + description: |- + Maximum age of a connection before it is closed and recreated. + Helps prevent issues with long-lived connections. It defaults to infinity. + type: string + maxConnectionAgeGrace: + description: |- + Grace period for closing connections that exceed MaxConnectionAge. + Allows ongoing RPCs to complete before closing the connection. + type: string + maxConnectionIdle: + description: |- + Maximum time a connection can remain idle before being closed. + It defaults to infinity. + type: string + minTime: + default: 1s + description: |- + Minimum time between keepalives that the connection will accept, under this threshold + the other side will get a GOAWAY signal. + Prevents excessive keepalive traffic on the network. + type: string + permitWithoutStream: + default: true + description: |- + Allow keepalive pings even when there are no active RPC streams. + Useful for detecting connection issues in idle connections. + This is important to keep TCP gRPC connections alive when traversing + load balancers and proxies. + type: boolean + timeout: + default: 180s + description: |- + Timeout for keepalive ping acknowledgment. + If a ping is not acknowledged within this time, the connection is considered broken. + The default is high to avoid issues when the network on a exporter is overloaded, i.e. + during flashing. + type: string + type: object + tls: + description: |- + TLS configuration for secure gRPC communication. + Requires a Kubernetes secret containing the TLS certificate and private key. + If useCertManager is enabled, this secret will be automatically created. + See also: spec.useCertManager for automatic certificate management. + properties: + certSecret: + description: |- + Name of the Kubernetes secret containing the TLS certificate and private key. + The secret must contain 'tls.crt' and 'tls.key' keys. + If useCertManager is enabled, this secret will be automatically created. + pattern: ^[a-z0-9]([a-z0-9\-\.]*[a-z0-9])?$ + type: string + type: object + type: object + image: + default: quay.io/jumpstarter-dev/jumpstarter-controller:latest + description: |- + Container image for the controller pods in 'registry/repository/image:tag' format. + If not specified, defaults to the latest stable version of the Jumpstarter controller. + type: string + imagePullPolicy: + default: IfNotPresent + description: |- + Image pull policy for the controller container. + Controls when the container image should be pulled from the registry. + enum: + - Always + - IfNotPresent + - Never + type: string + replicas: + default: 2 + description: |- + Number of controller replicas to run. + Must be a positive integer. Minimum recommended value is 2 for high availability. + format: int32 + minimum: 1 + type: integer + resources: + description: |- + Resource requirements for controller pods. + Defines CPU and memory requests and limits for each controller pod. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restApi: + description: |- + REST API configuration for HTTP-based clients. + Enables non-gRPC clients to interact with Jumpstarter for listing leases, + managing exporters, and creating new leases. Use this when you need HTTP/JSON access. + properties: + endpoints: + description: |- + List of REST API endpoints to expose. + Each endpoint can use different networking methods (Route, Ingress, NodePort, or LoadBalancer) + based on your cluster setup. + items: + description: |- + Endpoint defines a single endpoint configuration. + An endpoint can use one or more networking methods: Route, Ingress, NodePort, or LoadBalancer. + Multiple methods can be configured simultaneously for the same address. + properties: + address: + description: |- + Address for this endpoint in the format "hostname", "hostname:port", "IPv4", "IPv4:port", "[IPv6]", or "[IPv6]:port". + Required for Route and Ingress endpoints. Optional for NodePort and LoadBalancer endpoints. + When optional, the address is used for certificate generation and DNS resolution. + Supports templating with $(replica) for replica-specific addresses. + Examples: "grpc.example.com", "grpc.example.com:9090", "192.168.1.1:8080", "[2001:db8::1]:8443", "router-$(replica).example.com" + pattern: ^(\[[0-9a-fA-F:\.]+\]|[0-9]+(\.[0-9]+){3}|[a-z0-9$]([a-z0-9\-\.\$\(\)]*[a-z0-9\)])?)(:[0-9]+)?$ + type: string + clusterIP: + description: |- + ClusterIP configuration for internal service access. + Creates a ClusterIP service for this endpoint. + Useful for internal service-to-service communication or when + using a different method to expose the service externally. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations to add to the ClusterIP service. + Useful for configuring service-specific behavior and load balancer options. + type: object + enabled: + description: |- + Enable the ClusterIP service for this endpoint. + When disabled, no ClusterIP service will be created for this endpoint. + type: boolean + labels: + additionalProperties: + type: string + description: |- + Labels to add to the ClusterIP service. + Useful for monitoring, cost allocation, and resource organization. + type: object + type: object + ingress: + description: |- + Ingress configuration for standard Kubernetes clusters. + Creates an Ingress resource for this endpoint. + Requires an ingress controller to be installed. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations to add to the Kubernetes Ingress resource. + Useful for configuring ingress-specific behavior, TLS settings, and load balancer options. + type: object + class: + default: default + description: |- + Ingress class name for the Kubernetes Ingress. + Specifies which ingress controller should handle this ingress. + type: string + enabled: + description: |- + Enable the Kubernetes Ingress for this endpoint. + When disabled, no Ingress resource will be created for this endpoint. + When not specified, the operator will determine the best networking option for your cluster. + type: boolean + labels: + additionalProperties: + type: string + description: |- + Labels to add to the Kubernetes Ingress resource. + Useful for monitoring, cost allocation, and resource organization. + type: object + type: object + loadBalancer: + description: |- + LoadBalancer configuration for cloud environments. + Creates a LoadBalancer service for this endpoint. + Requires cloud provider support for LoadBalancer services. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations to add to the LoadBalancer service. + Useful for configuring cloud provider-specific load balancer options. + Example: "service.beta.kubernetes.io/aws-load-balancer-type: nlb" + type: object + enabled: + description: |- + Enable the LoadBalancer service for this endpoint. + When disabled, no LoadBalancer service will be created for this endpoint. + When not specified, the operator will determine the best networking option for your cluster. + type: boolean + labels: + additionalProperties: + type: string + description: |- + Labels to add to the LoadBalancer service. + Useful for monitoring, cost allocation, and resource organization. + type: object + port: + description: |- + Port number for the LoadBalancer service. + Must be a valid port number (1-65535). + format: int32 + maximum: 65535 + minimum: 1 + type: integer + type: object + nodeport: + description: |- + NodePort configuration for direct node access. + Exposes the service on a specific port on each node. + Useful for bare-metal or simple cluster setups. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations to add to the NodePort service. + Useful for configuring service-specific behavior and load balancer options. + type: object + enabled: + description: |- + Enable the NodePort service for this endpoint. + When disabled, no NodePort service will be created for this endpoint. + When not specified, the operator will determine the best networking option for your cluster. + type: boolean + labels: + additionalProperties: + type: string + description: |- + Labels to add to the NodePort service. + Useful for monitoring, cost allocation, and resource organization. + type: object + port: + description: |- + NodePort port number to expose on each node. + Must be in the range 30000-32767 for most Kubernetes clusters. + format: int32 + maximum: 65535 + minimum: 1 + type: integer + type: object + route: + description: |- + Route configuration for OpenShift clusters. + Creates an OpenShift Route resource for this endpoint. + Only applicable in OpenShift environments. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations to add to the OpenShift Route resource. + Useful for configuring route-specific behavior and TLS settings. + type: object + enabled: + description: |- + Enable the OpenShift Route for this endpoint. + When disabled, no Route resource will be created for this endpoint. + When not specified, the operator will determine the best networking option for your cluster. + type: boolean + labels: + additionalProperties: + type: string + description: |- + Labels to add to the OpenShift Route resource. + Useful for monitoring, cost allocation, and resource organization. + type: object + type: object + type: object + type: array + tls: + description: |- + TLS configuration for secure HTTP communication. + Requires a Kubernetes secret containing the TLS certificate and private key. + properties: + certSecret: + description: |- + Name of the Kubernetes secret containing the TLS certificate and private key. + The secret must contain 'tls.crt' and 'tls.key' keys. + If useCertManager is enabled, this secret will be automatically created. + pattern: ^[a-z0-9]([a-z0-9\-\.]*[a-z0-9])?$ + type: string + type: object + type: object + type: object + routers: + default: {} + description: |- + Router configuration for the Jumpstarter router service. + Routers handle gRPC traffic routing and load balancing. + properties: + grpc: + description: |- + gRPC configuration for router endpoints. + Defines how router gRPC services are exposed and configured. + properties: + endpoints: + description: |- + List of gRPC endpoints to expose. + Each endpoint can use different networking methods (Route, Ingress, NodePort, or LoadBalancer) + based on your cluster setup. Example: Use Route for OpenShift, Ingress for standard Kubernetes. + items: + description: |- + Endpoint defines a single endpoint configuration. + An endpoint can use one or more networking methods: Route, Ingress, NodePort, or LoadBalancer. + Multiple methods can be configured simultaneously for the same address. + properties: + address: + description: |- + Address for this endpoint in the format "hostname", "hostname:port", "IPv4", "IPv4:port", "[IPv6]", or "[IPv6]:port". + Required for Route and Ingress endpoints. Optional for NodePort and LoadBalancer endpoints. + When optional, the address is used for certificate generation and DNS resolution. + Supports templating with $(replica) for replica-specific addresses. + Examples: "grpc.example.com", "grpc.example.com:9090", "192.168.1.1:8080", "[2001:db8::1]:8443", "router-$(replica).example.com" + pattern: ^(\[[0-9a-fA-F:\.]+\]|[0-9]+(\.[0-9]+){3}|[a-z0-9$]([a-z0-9\-\.\$\(\)]*[a-z0-9\)])?)(:[0-9]+)?$ + type: string + clusterIP: + description: |- + ClusterIP configuration for internal service access. + Creates a ClusterIP service for this endpoint. + Useful for internal service-to-service communication or when + using a different method to expose the service externally. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations to add to the ClusterIP service. + Useful for configuring service-specific behavior and load balancer options. + type: object + enabled: + description: |- + Enable the ClusterIP service for this endpoint. + When disabled, no ClusterIP service will be created for this endpoint. + type: boolean + labels: + additionalProperties: + type: string + description: |- + Labels to add to the ClusterIP service. + Useful for monitoring, cost allocation, and resource organization. + type: object + type: object + ingress: + description: |- + Ingress configuration for standard Kubernetes clusters. + Creates an Ingress resource for this endpoint. + Requires an ingress controller to be installed. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations to add to the Kubernetes Ingress resource. + Useful for configuring ingress-specific behavior, TLS settings, and load balancer options. + type: object + class: + default: default + description: |- + Ingress class name for the Kubernetes Ingress. + Specifies which ingress controller should handle this ingress. + type: string + enabled: + description: |- + Enable the Kubernetes Ingress for this endpoint. + When disabled, no Ingress resource will be created for this endpoint. + When not specified, the operator will determine the best networking option for your cluster. + type: boolean + labels: + additionalProperties: + type: string + description: |- + Labels to add to the Kubernetes Ingress resource. + Useful for monitoring, cost allocation, and resource organization. + type: object + type: object + loadBalancer: + description: |- + LoadBalancer configuration for cloud environments. + Creates a LoadBalancer service for this endpoint. + Requires cloud provider support for LoadBalancer services. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations to add to the LoadBalancer service. + Useful for configuring cloud provider-specific load balancer options. + Example: "service.beta.kubernetes.io/aws-load-balancer-type: nlb" + type: object + enabled: + description: |- + Enable the LoadBalancer service for this endpoint. + When disabled, no LoadBalancer service will be created for this endpoint. + When not specified, the operator will determine the best networking option for your cluster. + type: boolean + labels: + additionalProperties: + type: string + description: |- + Labels to add to the LoadBalancer service. + Useful for monitoring, cost allocation, and resource organization. + type: object + port: + description: |- + Port number for the LoadBalancer service. + Must be a valid port number (1-65535). + format: int32 + maximum: 65535 + minimum: 1 + type: integer + type: object + nodeport: + description: |- + NodePort configuration for direct node access. + Exposes the service on a specific port on each node. + Useful for bare-metal or simple cluster setups. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations to add to the NodePort service. + Useful for configuring service-specific behavior and load balancer options. + type: object + enabled: + description: |- + Enable the NodePort service for this endpoint. + When disabled, no NodePort service will be created for this endpoint. + When not specified, the operator will determine the best networking option for your cluster. + type: boolean + labels: + additionalProperties: + type: string + description: |- + Labels to add to the NodePort service. + Useful for monitoring, cost allocation, and resource organization. + type: object + port: + description: |- + NodePort port number to expose on each node. + Must be in the range 30000-32767 for most Kubernetes clusters. + format: int32 + maximum: 65535 + minimum: 1 + type: integer + type: object + route: + description: |- + Route configuration for OpenShift clusters. + Creates an OpenShift Route resource for this endpoint. + Only applicable in OpenShift environments. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations to add to the OpenShift Route resource. + Useful for configuring route-specific behavior and TLS settings. + type: object + enabled: + description: |- + Enable the OpenShift Route for this endpoint. + When disabled, no Route resource will be created for this endpoint. + When not specified, the operator will determine the best networking option for your cluster. + type: boolean + labels: + additionalProperties: + type: string + description: |- + Labels to add to the OpenShift Route resource. + Useful for monitoring, cost allocation, and resource organization. + type: object + type: object + type: object + type: array + keepalive: + description: |- + Keepalive configuration for gRPC connections. + Controls connection health checks and idle connection management. + Helps maintain stable connections in load-balanced environments. + properties: + intervalTime: + default: 10s + description: |- + Interval between keepalive pings. + How often to send keepalive pings to check connection health. This is important + to keep TCP gRPC connections alive when traversing load balancers and proxies. + type: string + maxConnectionAge: + description: |- + Maximum age of a connection before it is closed and recreated. + Helps prevent issues with long-lived connections. It defaults to infinity. + type: string + maxConnectionAgeGrace: + description: |- + Grace period for closing connections that exceed MaxConnectionAge. + Allows ongoing RPCs to complete before closing the connection. + type: string + maxConnectionIdle: + description: |- + Maximum time a connection can remain idle before being closed. + It defaults to infinity. + type: string + minTime: + default: 1s + description: |- + Minimum time between keepalives that the connection will accept, under this threshold + the other side will get a GOAWAY signal. + Prevents excessive keepalive traffic on the network. + type: string + permitWithoutStream: + default: true + description: |- + Allow keepalive pings even when there are no active RPC streams. + Useful for detecting connection issues in idle connections. + This is important to keep TCP gRPC connections alive when traversing + load balancers and proxies. + type: boolean + timeout: + default: 180s + description: |- + Timeout for keepalive ping acknowledgment. + If a ping is not acknowledged within this time, the connection is considered broken. + The default is high to avoid issues when the network on a exporter is overloaded, i.e. + during flashing. + type: string + type: object + tls: + description: |- + TLS configuration for secure gRPC communication. + Requires a Kubernetes secret containing the TLS certificate and private key. + If useCertManager is enabled, this secret will be automatically created. + See also: spec.useCertManager for automatic certificate management. + properties: + certSecret: + description: |- + Name of the Kubernetes secret containing the TLS certificate and private key. + The secret must contain 'tls.crt' and 'tls.key' keys. + If useCertManager is enabled, this secret will be automatically created. + pattern: ^[a-z0-9]([a-z0-9\-\.]*[a-z0-9])?$ + type: string + type: object + type: object + image: + default: quay.io/jumpstarter-dev/jumpstarter-controller:latest + description: |- + Container image for the router pods in 'registry/repository/image:tag' format. + If not specified, defaults to the latest stable version of the Jumpstarter router. + type: string + imagePullPolicy: + default: IfNotPresent + description: |- + Image pull policy for the router container. + Controls when the container image should be pulled from the registry. + enum: + - Always + - IfNotPresent + - Never + type: string + replicas: + default: 3 + description: |- + Number of router replicas to run. + Must be a positive integer. Minimum recommended value is 3 for high availability. + format: int32 + minimum: 1 + type: integer + resources: + description: |- + Resource requirements for router pods. + Defines CPU and memory requests and limits for each router pod. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + topologySpreadConstraints: + description: |- + Topology spread constraints for router pod distribution. + Ensures router pods are distributed evenly across nodes and zones. + Useful for high availability and fault tolerance. + items: + description: TopologySpreadConstraint specifies how to spread + matching pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + type: object + useCertManager: + default: true + description: |- + Enable automatic TLS certificate management using cert-manager. + When enabled, jumpstarter will interact with cert-manager to automatically provision + and renew TLS certificates for all endpoints. Requires cert-manager to be installed in the cluster. + type: boolean + type: object + status: + description: |- + JumpstarterStatus defines the observed state of Jumpstarter. + This field is currently empty but can be extended to include status information + such as deployment status, endpoint URLs, and health information. + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.3 + name: leases.jumpstarter.dev +spec: + group: jumpstarter.dev + names: + kind: Lease + listKind: LeaseList + plural: leases + singular: lease + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.ended + name: Ended + type: boolean + - jsonPath: .spec.clientRef.name + name: Client + type: string + - jsonPath: .status.exporterRef.name + name: Exporter + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: Lease is the Schema for the exporters API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: LeaseSpec defines the desired state of Lease + properties: + beginTime: + description: |- + Requested start time. If omitted, lease starts when exporter is acquired. + Immutable after lease starts (cannot change the past). + format: date-time + type: string + clientRef: + description: The client that is requesting the lease + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + duration: + description: |- + Duration of the lease. Must be positive when provided. + Can be omitted (nil) when both BeginTime and EndTime are provided, + in which case it's calculated as EndTime - BeginTime. + type: string + endTime: + description: |- + Requested end time. If specified with BeginTime, Duration is calculated. + Can be updated to extend or shorten active leases. + format: date-time + type: string + release: + description: The release flag requests the controller to end the lease + now + type: boolean + selector: + description: The selector for the exporter to be used + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + required: + - clientRef + - selector + type: object + status: + description: LeaseStatus defines the observed state of Lease + properties: + beginTime: + description: |- + If the lease has been acquired an exporter name is assigned + and then it can be used, it will be empty while still pending + format: date-time + type: string + conditions: + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + endTime: + format: date-time + type: string + ended: + type: boolean + exporterRef: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + priority: + type: integer + spotAccess: + type: boolean + required: + - ended + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: jumpstarter-operator + name: jumpstarter-operator-controller-manager + namespace: jumpstarter-operator-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: jumpstarter-operator + name: jumpstarter-operator-leader-election-role + namespace: jumpstarter-operator-system +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: jumpstarter-operator + name: jumpstarter-operator-jumpstarter-admin-role +rules: +- apiGroups: + - operator.jumpstarter.dev + resources: + - jumpstarters + verbs: + - '*' +- apiGroups: + - operator.jumpstarter.dev + resources: + - jumpstarters/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: jumpstarter-operator + name: jumpstarter-operator-jumpstarter-editor-role +rules: +- apiGroups: + - operator.jumpstarter.dev + resources: + - jumpstarters + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - operator.jumpstarter.dev + resources: + - jumpstarters/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: jumpstarter-operator + name: jumpstarter-operator-jumpstarter-viewer-role +rules: +- apiGroups: + - operator.jumpstarter.dev + resources: + - jumpstarters + verbs: + - get + - list + - watch +- apiGroups: + - operator.jumpstarter.dev + resources: + - jumpstarters/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: jumpstarter-operator-manager-role +rules: +- apiGroups: + - "" + resources: + - configmaps + - secrets + - serviceaccounts + - services + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - "" + resources: + - services/status + verbs: + - get + - patch + - update +- apiGroups: + - apps + resources: + - deployments + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - apps + resources: + - deployments/status + verbs: + - get + - patch + - update +- apiGroups: + - config.openshift.io + resources: + - ingresses + verbs: + - get + - list + - watch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - jumpstarter.dev + resources: + - clients + - exporteraccesspolicies + - exporters + - leases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - jumpstarter.dev + resources: + - clients/finalizers + - exporteraccesspolicies/finalizers + - exporters/finalizers + - leases/finalizers + verbs: + - update +- apiGroups: + - jumpstarter.dev + resources: + - clients/status + - exporteraccesspolicies/status + - exporters/status + - leases/status + verbs: + - get + - patch + - update +- apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - get + - patch + - update +- apiGroups: + - operator.jumpstarter.dev + resources: + - jumpstarters + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - operator.jumpstarter.dev + resources: + - jumpstarters/finalizers + verbs: + - update +- apiGroups: + - operator.jumpstarter.dev + resources: + - jumpstarters/status + verbs: + - get + - patch + - update +- apiGroups: + - rbac.authorization.k8s.io + resources: + - rolebindings + - roles + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - route.openshift.io + resources: + - routes + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - route.openshift.io + resources: + - routes/custom-host + verbs: + - create + - get + - patch + - update +- apiGroups: + - route.openshift.io + resources: + - routes/status + verbs: + - get + - patch + - update +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: jumpstarter-operator-metrics-auth-role +rules: +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: jumpstarter-operator + name: jumpstarter-operator-metrics-reader +rules: +- nonResourceURLs: + - /metrics + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: jumpstarter-operator + name: jumpstarter-operator-leader-election-rolebinding + namespace: jumpstarter-operator-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: jumpstarter-operator-leader-election-role +subjects: +- kind: ServiceAccount + name: jumpstarter-operator-controller-manager + namespace: jumpstarter-operator-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: jumpstarter-operator + name: jumpstarter-operator-manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: jumpstarter-operator-manager-role +subjects: +- kind: ServiceAccount + name: jumpstarter-operator-controller-manager + namespace: jumpstarter-operator-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: jumpstarter-operator-metrics-auth-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: jumpstarter-operator-metrics-auth-role +subjects: +- kind: ServiceAccount + name: jumpstarter-operator-controller-manager + namespace: jumpstarter-operator-system +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: jumpstarter-operator + control-plane: controller-manager + name: jumpstarter-operator-controller-manager-metrics-service + namespace: jumpstarter-operator-system +spec: + ports: + - name: https + port: 8443 + protocol: TCP + targetPort: 8443 + selector: + app.kubernetes.io/name: jumpstarter-operator + control-plane: controller-manager +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: jumpstarter-operator + control-plane: controller-manager + name: jumpstarter-operator-controller-manager + namespace: jumpstarter-operator-system +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: jumpstarter-operator + control-plane: controller-manager + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: manager + labels: + app.kubernetes.io/name: jumpstarter-operator + control-plane: controller-manager + spec: + containers: + - args: + - --metrics-bind-address=:8443 + - --leader-elect + - --health-probe-bind-address=:8081 + command: + - /manager + image: quay.io/jumpstarter-dev/jumpstarter-operator:latest + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + name: manager + ports: [] + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + resources: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 10m + memory: 256Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + volumeMounts: [] + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: jumpstarter-operator-controller-manager + terminationGracePeriodSeconds: 10 + volumes: [] diff --git a/controller/deploy/operator/go.mod b/controller/deploy/operator/go.mod new file mode 100644 index 000000000..132f83489 --- /dev/null +++ b/controller/deploy/operator/go.mod @@ -0,0 +1,138 @@ +module github.com/jumpstarter-dev/jumpstarter-controller/deploy/operator + +go 1.24.0 + +require ( + github.com/go-logr/logr v1.4.2 + github.com/jumpstarter-dev/jumpstarter-controller v0.7.1 + github.com/onsi/ginkgo/v2 v2.22.2 + github.com/onsi/gomega v1.36.2 + github.com/openshift/api v0.0.0-20251023135607-98e18dae8c7a + github.com/pmezard/go-difflib v1.0.0 + k8s.io/api v0.33.0 + k8s.io/apimachinery v0.33.0 + k8s.io/apiserver v0.33.0 + k8s.io/client-go v0.33.0 + sigs.k8s.io/controller-runtime v0.21.0 + sigs.k8s.io/yaml v1.4.0 +) + +replace github.com/jumpstarter-dev/jumpstarter-controller => ../../ + +require ( + cel.dev/expr v0.19.1 // indirect + filippo.io/bigmod v0.0.3 // indirect + filippo.io/keygen v0.0.0-20240718133620-7f162efbbd87 // indirect + github.com/antlr4-go/antlr/v4 v4.13.0 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect + github.com/bmatcuk/doublestar/v4 v4.8.0 // indirect + github.com/bytedance/sonic v1.11.6 // indirect + github.com/bytedance/sonic/loader v0.1.1 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/cloudwego/base64x v0.1.4 // indirect + github.com/cloudwego/iasm v0.2.0 // indirect + github.com/coreos/go-oidc v2.3.0+incompatible // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/evanphx/json-patch/v5 v5.9.11 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect + github.com/gabriel-vasile/mimetype v1.4.3 // indirect + github.com/gin-contrib/sse v0.1.0 // indirect + github.com/gin-gonic/gin v1.10.0 // indirect + github.com/go-chi/chi/v5 v5.2.0 // indirect + github.com/go-jose/go-jose/v4 v4.0.4 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-logr/zapr v1.3.0 // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/swag v0.23.0 // indirect + github.com/go-playground/locales v0.14.1 // indirect + github.com/go-playground/universal-translator v0.18.1 // indirect + github.com/go-playground/validator/v10 v10.20.0 // indirect + github.com/go-task/slim-sprig/v3 v3.0.0 // indirect + github.com/goccy/go-json v0.10.2 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang-jwt/jwt/v5 v5.2.1 // indirect + github.com/google/btree v1.1.3 // indirect + github.com/google/cel-go v0.23.2 // indirect + github.com/google/gnostic-models v0.6.9 // indirect + github.com/google/go-cmp v0.7.0 // indirect + github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/gorilla/securecookie v1.1.2 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/cpuid/v2 v2.2.7 // indirect + github.com/leodido/go-urn v1.4.0 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/muhlemmer/gu v0.3.1 // indirect + github.com/muhlemmer/httpforwarded v0.1.0 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/pelletier/go-toml/v2 v2.2.2 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pquerna/cachecontrol v0.1.0 // indirect + github.com/prometheus/client_golang v1.22.0 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.62.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect + github.com/rs/cors v1.11.1 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/spf13/cobra v1.8.1 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/stoewer/go-strcase v1.3.0 // indirect + github.com/twitchyliquid64/golang-asm v0.15.1 // indirect + github.com/ugorji/go/codec v1.2.12 // indirect + github.com/x448/float16 v0.8.4 // indirect + github.com/zitadel/logging v0.6.1 // indirect + github.com/zitadel/oidc/v3 v3.34.1 // indirect + github.com/zitadel/schema v1.3.0 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect + go.opentelemetry.io/otel v1.33.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 // indirect + go.opentelemetry.io/otel/metric v1.33.0 // indirect + go.opentelemetry.io/otel/sdk v1.33.0 // indirect + go.opentelemetry.io/otel/trace v1.33.0 // indirect + go.opentelemetry.io/proto/otlp v1.4.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.27.0 // indirect + golang.org/x/arch v0.8.0 // indirect + golang.org/x/crypto v0.36.0 // indirect + golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect + golang.org/x/net v0.38.0 // indirect + golang.org/x/oauth2 v0.27.0 // indirect + golang.org/x/sync v0.12.0 // indirect + golang.org/x/sys v0.31.0 // indirect + golang.org/x/term v0.30.0 // indirect + golang.org/x/text v0.23.0 // indirect + golang.org/x/time v0.9.0 // indirect + golang.org/x/tools v0.28.0 // indirect + gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250204164813-702378808489 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250204164813-702378808489 // indirect + google.golang.org/grpc v1.70.0 // indirect + google.golang.org/protobuf v1.36.5 // indirect + gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect + gopkg.in/go-jose/go-jose.v2 v2.6.3 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/apiextensions-apiserver v0.33.0 // indirect + k8s.io/component-base v0.33.0 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect + k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 // indirect + sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect +) diff --git a/controller/deploy/operator/go.sum b/controller/deploy/operator/go.sum new file mode 100644 index 000000000..c2a19aa6f --- /dev/null +++ b/controller/deploy/operator/go.sum @@ -0,0 +1,352 @@ +cel.dev/expr v0.19.1 h1:NciYrtDRIR0lNCnH1LFJegdjspNx9fI59O7TWcua/W4= +cel.dev/expr v0.19.1/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= +filippo.io/bigmod v0.0.3 h1:qmdCFHmEMS+PRwzrW6eUrgA4Q3T8D6bRcjsypDMtWHM= +filippo.io/bigmod v0.0.3/go.mod h1:WxGvOYE0OUaBC2N112Dflb3CjOnMBuNRA2UWZc2UbPE= +filippo.io/keygen v0.0.0-20240718133620-7f162efbbd87 h1:HlcHAMbI9Xvw3aWnhPngghMl5AKE2GOvjmvSGOKzCcI= +filippo.io/keygen v0.0.0-20240718133620-7f162efbbd87/go.mod h1:nAs0+DyACEQGudhkTwlPC9atyqDYC7ZotgZR7D8OwXM= +github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI= +github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/bmatcuk/doublestar/v4 v4.8.0 h1:DSXtrypQddoug1459viM9X9D3dp1Z7993fw36I2kNcQ= +github.com/bmatcuk/doublestar/v4 v4.8.0/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= +github.com/bytedance/sonic v1.11.6 h1:oUp34TzMlL+OY1OUWxHqsdkgC/Zfc85zGqw9siXjrc0= +github.com/bytedance/sonic v1.11.6/go.mod h1:LysEHSvpvDySVdC2f87zGWf6CIKJcAvqab1ZaiQtds4= +github.com/bytedance/sonic/loader v0.1.1 h1:c+e5Pt1k/cy5wMveRDyk2X4B9hF4g7an8N3zCYjJFNM= +github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU= +github.com/canonical/go-sp800.90a-drbg v0.0.0-20210314144037-6eeb1040d6c3 h1:oe6fCvaEpkhyW3qAicT0TnGtyht/UrgvOwMcEgLb7Aw= +github.com/canonical/go-sp800.90a-drbg v0.0.0-20210314144037-6eeb1040d6c3/go.mod h1:qdP0gaj0QtgX2RUZhnlVrceJ+Qln8aSlDyJwelLLFeM= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cloudwego/base64x v0.1.4 h1:jwCgWpFanWmN8xoIUHa2rtzmkd5J2plF/dnLS6Xd/0Y= +github.com/cloudwego/base64x v0.1.4/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w= +github.com/cloudwego/iasm v0.2.0 h1:1KNIy1I1H9hNNFEEH3DVnI4UujN+1zjpuk6gwHLTssg= +github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY= +github.com/coreos/go-oidc v2.3.0+incompatible h1:+5vEsrgprdLjjQ9FzIKAzQz1wwPD+83hQRfUIPh7rO0= +github.com/coreos/go-oidc v2.3.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/evanphx/json-patch v0.5.2 h1:xVCHIVMUu1wtM/VkR9jVZ45N3FhZfYMMYGorLCR8P3k= +github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= +github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= +github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0= +github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk= +github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.10.0 h1:nTuyha1TYqgedzytsKYqna+DfLos46nTv2ygFy86HFU= +github.com/gin-gonic/gin v1.10.0/go.mod h1:4PMNQiOhvDRa013RKVbsiNwoyezlm2rm0uX/T7kzp5Y= +github.com/go-chi/chi/v5 v5.2.0 h1:Aj1EtB0qR2Rdo2dG4O94RIU35w2lvQSj6BRA4+qwFL0= +github.com/go-chi/chi/v5 v5.2.0/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= +github.com/go-jose/go-jose/v4 v4.0.4 h1:VsjPI33J0SB9vQM6PLmNjoHqMQNGPiZ0rHL7Ni7Q6/E= +github.com/go-jose/go-jose/v4 v4.0.4/go.mod h1:NKb5HO1EZccyMpiZNbdUw/14tiXNyUJh188dfnMCAfc= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= +github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= +github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= +github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= +github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= +github.com/go-playground/validator/v10 v10.20.0 h1:K9ISHbSaI0lyB2eWMPJo+kOS/FBExVwjEviJTixqxL8= +github.com/go-playground/validator/v10 v10.20.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= +github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= +github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/cel-go v0.23.2 h1:UdEe3CvQh3Nv+E/j9r1Y//WO0K0cSyD7/y0bzyLIMI4= +github.com/google/cel-go v0.23.2/go.mod h1:52Pb6QsDbC5kvgxvZhiL9QX1oZEkcUF/ZqaPx1J5Wwo= +github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw= +github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg= +github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/securecookie v1.1.2 h1:YCIWL56dvtr73r6715mJs5ZvhtnY73hBvEF8kXD8ePA= +github.com/gorilla/securecookie v1.1.2/go.mod h1:NfCASbcHqRSY+3a8tlWJwsQap2VX5pwzwo4h3eOamfo= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 h1:TmHmbvxPmaegwhDubVz0lICL0J5Ka2vwTzhoePEXsGE= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0/go.mod h1:qztMSjm835F2bXf+5HKAPIS5qsmQDqZna/PgVt4rWtI= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM= +github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= +github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/muhlemmer/gu v0.3.1 h1:7EAqmFrW7n3hETvuAdmFmn4hS8W+z3LgKtrnow+YzNM= +github.com/muhlemmer/gu v0.3.1/go.mod h1:YHtHR+gxM+bKEIIs7Hmi9sPT3ZDUvTN/i88wQpZkrdM= +github.com/muhlemmer/httpforwarded v0.1.0 h1:x4DLrzXdliq8mprgUMR0olDvHGkou5BJsK/vWUetyzY= +github.com/muhlemmer/httpforwarded v0.1.0/go.mod h1:yo9czKedo2pdZhoXe+yDkGVbU0TJ0q9oQ90BVoDEtw0= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/onsi/ginkgo/v2 v2.22.2 h1:/3X8Panh8/WwhU/3Ssa6rCKqPLuAkVY2I0RoyDLySlU= +github.com/onsi/ginkgo/v2 v2.22.2/go.mod h1:oeMosUL+8LtarXBHu/c0bx2D/K9zyQ6uX3cTyztHwsk= +github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8= +github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY= +github.com/openshift/api v0.0.0-20251023135607-98e18dae8c7a h1:Xi0/4kyXnyvnml1FG7q4xNGsXOBLGMIadZg7SxS8PNk= +github.com/openshift/api v0.0.0-20251023135607-98e18dae8c7a/go.mod h1:Shkl4HanLwDiiBzakv+con/aMGnVE2MAGvoKp5oyYUo= +github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= +github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pquerna/cachecontrol v0.1.0 h1:yJMy84ti9h/+OEWa752kBTKv4XC30OtVVHYv/8cTqKc= +github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= +github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= +github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA= +github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= +github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= +github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= +github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE= +github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/zitadel/logging v0.6.1 h1:Vyzk1rl9Kq9RCevcpX6ujUaTYFX43aa4LkvV1TvUk+Y= +github.com/zitadel/logging v0.6.1/go.mod h1:Y4CyAXHpl3Mig6JOszcV5Rqqsojj+3n7y2F591Mp/ow= +github.com/zitadel/oidc/v3 v3.34.1 h1:/rxx2HxEowd8Sdb8sxcRxTu9pLy3/TXBLrewKOUMTHA= +github.com/zitadel/oidc/v3 v3.34.1/go.mod h1:lhAdAP1iWAnpfWF8CWNiO6yKvGFtPMuAubPwP5JC7Ec= +github.com/zitadel/schema v1.3.0 h1:kQ9W9tvIwZICCKWcMvCEweXET1OcOyGEuFbHs4o5kg0= +github.com/zitadel/schema v1.3.0/go.mod h1:NptN6mkBDFvERUCvZHlvWmmME+gmZ44xzwRXwhzsbtc= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q= +go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw= +go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx69E/Y/DcQSMPpKANYVMQ7fBA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 h1:5pojmb1U1AogINhN3SurB+zm/nIcusopeBNp42f45QM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0/go.mod h1:57gTHJSE5S1tqg+EKsLPlTWhpHMsWlVmer+LA926XiA= +go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ= +go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M= +go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM= +go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM= +go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU= +go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ= +go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s= +go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck= +go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg= +go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= +golang.org/x/arch v0.8.0 h1:3wRIsP3pM4yUptoR96otTUOXI367OS0+c9eeRi9doIc= +golang.org/x/arch v0.8.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= +golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= +golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= +golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= +golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= +golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= +golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= +golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= +golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= +golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= +golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= +golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= +golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8= +golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= +gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= +google.golang.org/genproto/googleapis/api v0.0.0-20250204164813-702378808489 h1:fCuMM4fowGzigT89NCIsW57Pk9k2D12MMi2ODn+Nk+o= +google.golang.org/genproto/googleapis/api v0.0.0-20250204164813-702378808489/go.mod h1:iYONQfRdizDB8JJBybql13nArx91jcUk7zCXEsOofM4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250204164813-702378808489 h1:5bKytslY8ViY0Cj/ewmRtrWHW64bNF03cAatUUFCdFI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250204164813-702378808489/go.mod h1:8BS3B93F/U1juMFq9+EDk+qOT5CO1R9IzXxG3PTqiRk= +google.golang.org/grpc v1.70.0 h1:pWFv03aZoHzlRKHWicjsZytKAiYCtNS0dHbXnIdq7jQ= +google.golang.org/grpc v1.70.0/go.mod h1:ofIJqVKDXx/JiXrwr2IG4/zwdH9txy3IlF40RmcJSQw= +google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= +google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= +gopkg.in/go-jose/go-jose.v2 v2.6.3 h1:nt80fvSDlhKWQgSWyHyy5CfmlQr+asih51R8PTWNKKs= +gopkg.in/go-jose/go-jose.v2 v2.6.3/go.mod h1:zzZDPkNNw/c9IE7Z9jr11mBZQhKQTMzoEEIoEdZlFBI= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +k8s.io/api v0.33.0 h1:yTgZVn1XEe6opVpP1FylmNrIFWuDqe2H0V8CT5gxfIU= +k8s.io/api v0.33.0/go.mod h1:CTO61ECK/KU7haa3qq8sarQ0biLq2ju405IZAd9zsiM= +k8s.io/apiextensions-apiserver v0.33.0 h1:d2qpYL7Mngbsc1taA4IjJPRJ9ilnsXIrndH+r9IimOs= +k8s.io/apiextensions-apiserver v0.33.0/go.mod h1:VeJ8u9dEEN+tbETo+lFkwaaZPg6uFKLGj5vyNEwwSzc= +k8s.io/apimachinery v0.33.0 h1:1a6kHrJxb2hs4t8EE5wuR/WxKDwGN1FKH3JvDtA0CIQ= +k8s.io/apimachinery v0.33.0/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= +k8s.io/apiserver v0.33.0 h1:QqcM6c+qEEjkOODHppFXRiw/cE2zP85704YrQ9YaBbc= +k8s.io/apiserver v0.33.0/go.mod h1:EixYOit0YTxt8zrO2kBU7ixAtxFce9gKGq367nFmqI8= +k8s.io/client-go v0.33.0 h1:UASR0sAYVUzs2kYuKn/ZakZlcs2bEHaizrrHUZg0G98= +k8s.io/client-go v0.33.0/go.mod h1:kGkd+l/gNGg8GYWAPr0xF1rRKvVWvzh9vmZAMXtaKOg= +k8s.io/component-base v0.33.0 h1:Ot4PyJI+0JAD9covDhwLp9UNkUja209OzsJ4FzScBNk= +k8s.io/component-base v0.33.0/go.mod h1:aXYZLbw3kihdkOPMDhWbjGCO6sg+luw554KP51t8qCU= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4= +k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8= +k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= +k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 h1:jpcvIRr3GLoUoEKRkHKSmGjxb6lWwrBlJsXc+eUYQHM= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= +sigs.k8s.io/controller-runtime v0.21.0 h1:CYfjpEuicjUecRk+KAeyYh+ouUBn4llGyDYytIGcJS8= +sigs.k8s.io/controller-runtime v0.21.0/go.mod h1:OSg14+F65eWqIu4DceX7k/+QRAbTTvxeQSNSOQpukWM= +sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= +sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= +sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc= +sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/controller/deploy/operator/hack/boilerplate.go.txt b/controller/deploy/operator/hack/boilerplate.go.txt new file mode 100644 index 000000000..221dcbe0b --- /dev/null +++ b/controller/deploy/operator/hack/boilerplate.go.txt @@ -0,0 +1,15 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ \ No newline at end of file diff --git a/controller/deploy/operator/internal/controller/jumpstarter/compare.go b/controller/deploy/operator/internal/controller/jumpstarter/compare.go new file mode 100644 index 000000000..d22d2872a --- /dev/null +++ b/controller/deploy/operator/internal/controller/jumpstarter/compare.go @@ -0,0 +1,158 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jumpstarter + +import ( + "fmt" + + "github.com/go-logr/logr" + "github.com/pmezard/go-difflib/difflib" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/equality" + "sigs.k8s.io/yaml" +) + +// deploymentNeedsUpdate checks if a deployment needs to be updated using K8s semantic equality. +func deploymentNeedsUpdate(existing, desired *appsv1.Deployment) bool { + // Compare labels (only if desired.Labels is non-nil) + if desired.Labels != nil && !equality.Semantic.DeepEqual(existing.Labels, desired.Labels) { + return true + } + + // Compare annotations (only if desired.Annotations is non-nil) + if desired.Annotations != nil && !equality.Semantic.DeepEqual(existing.Annotations, desired.Annotations) { + return true + } + + // Compare the entire Spec using K8s semantic equality (handles nil vs empty automatically) + return !equality.Semantic.DeepEqual(existing.Spec, desired.Spec) +} + +// configMapNeedsUpdate checks if a configmap needs to be updated using K8s semantic equality. +func configMapNeedsUpdate(existing, desired *corev1.ConfigMap, log logr.Logger) bool { + // Compare labels (only if desired.Labels is non-nil) + if desired.Labels != nil && !equality.Semantic.DeepEqual(existing.Labels, desired.Labels) { + return true + } + + // Compare annotations (only if desired.Annotations is non-nil) + if desired.Annotations != nil && !equality.Semantic.DeepEqual(existing.Annotations, desired.Annotations) { + return true + } + + // Compare data (only if desired.Data is non-nil) + if desired.Data != nil && !equality.Semantic.DeepEqual(existing.Data, desired.Data) { + return true + } + + // Compare binary data (only if desired.BinaryData is non-nil) + if desired.BinaryData != nil && !equality.Semantic.DeepEqual(existing.BinaryData, desired.BinaryData) { + return true + } + + return false +} + +// serviceAccountNeedsUpdate checks if a service account needs to be updated using K8s semantic equality. +func serviceAccountNeedsUpdate(existing, desired *corev1.ServiceAccount) bool { + // Compare labels (only if desired.Labels is non-nil) + if desired.Labels != nil && !equality.Semantic.DeepEqual(existing.Labels, desired.Labels) { + return true + } + + // Compare annotations (only if desired.Annotations is non-nil) + if desired.Annotations != nil && !equality.Semantic.DeepEqual(existing.Annotations, desired.Annotations) { + return true + } + + return false +} + +// roleNeedsUpdate checks if a role needs to be updated using K8s semantic equality. +func roleNeedsUpdate(existing, desired *rbacv1.Role) bool { + // Compare labels (only if desired.Labels is non-nil) + if desired.Labels != nil && !equality.Semantic.DeepEqual(existing.Labels, desired.Labels) { + return true + } + + // Compare annotations (only if desired.Annotations is non-nil) + if desired.Annotations != nil && !equality.Semantic.DeepEqual(existing.Annotations, desired.Annotations) { + return true + } + + // Compare rules (only if non-nil in desired) + if desired.Rules != nil && !equality.Semantic.DeepEqual(existing.Rules, desired.Rules) { + return true + } + + return false +} + +// roleBindingNeedsUpdate checks if a role binding needs to be updated using K8s semantic equality. +func roleBindingNeedsUpdate(existing, desired *rbacv1.RoleBinding) bool { + // Compare labels (only if desired.Labels is non-nil) + if desired.Labels != nil && !equality.Semantic.DeepEqual(existing.Labels, desired.Labels) { + return true + } + + // Compare annotations (only if desired.Annotations is non-nil) + if desired.Annotations != nil && !equality.Semantic.DeepEqual(existing.Annotations, desired.Annotations) { + return true + } + + // Compare subjects (only if non-nil in desired) + if desired.Subjects != nil && !equality.Semantic.DeepEqual(existing.Subjects, desired.Subjects) { + return true + } + + // Compare role ref (only if non-zero in desired) + if desired.RoleRef.Name != "" && !equality.Semantic.DeepEqual(existing.RoleRef, desired.RoleRef) { + return true + } + + return false +} + +// generateDiff creates a unified diff between existing and desired resources. +// It works with any Kubernetes resource type. +// Returns the diff string and any error encountered during serialization. +func generateDiff[T any](existing, desired *T) (string, error) { + // Serialize existing resource to YAML + existingYAML, err := yaml.Marshal(existing) + if err != nil { + return "", fmt.Errorf("failed to marshal existing resource: %w", err) + } + + // Serialize desired resource to YAML + desiredYAML, err := yaml.Marshal(desired) + if err != nil { + return "", fmt.Errorf("failed to marshal desired resource: %w", err) + } + + // Generate unified diff + diff := difflib.UnifiedDiff{ + A: difflib.SplitLines(string(existingYAML)), + B: difflib.SplitLines(string(desiredYAML)), + FromFile: "Existing", + ToFile: "Desired", + Context: 3, + } + + return difflib.GetUnifiedDiffString(diff) +} diff --git a/controller/deploy/operator/internal/controller/jumpstarter/endpoints/defaults.go b/controller/deploy/operator/internal/controller/jumpstarter/endpoints/defaults.go new file mode 100644 index 000000000..830eb71a2 --- /dev/null +++ b/controller/deploy/operator/internal/controller/jumpstarter/endpoints/defaults.go @@ -0,0 +1,85 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package endpoints + +import ( + "fmt" + + operatorv1alpha1 "github.com/jumpstarter-dev/jumpstarter-controller/deploy/operator/api/v1alpha1" +) + +// ensureEndpointServiceType ensures an endpoint has a service type enabled. +// If no service type is enabled, it auto-selects Route (if available), Ingress (if available), +// or ClusterIP as a fallback. +func ensureEndpointServiceType(endpoint *operatorv1alpha1.Endpoint, routeAvailable, ingressAvailable bool) { + // Skip if any service type is already enabled + if (endpoint.Route != nil && endpoint.Route.Enabled) || + (endpoint.Ingress != nil && endpoint.Ingress.Enabled) || + (endpoint.LoadBalancer != nil && endpoint.LoadBalancer.Enabled) || + (endpoint.NodePort != nil && endpoint.NodePort.Enabled) || + (endpoint.ClusterIP != nil && endpoint.ClusterIP.Enabled) { + return + } + + // Auto-select based on cluster capabilities, fallback to ClusterIP + if routeAvailable { + endpoint.Route = &operatorv1alpha1.RouteConfig{Enabled: true} + } else if ingressAvailable { + endpoint.Ingress = &operatorv1alpha1.IngressConfig{Enabled: true} + } else { + endpoint.ClusterIP = &operatorv1alpha1.ClusterIPConfig{Enabled: true} + } +} + +// ApplyEndpointDefaults generates default endpoints for a JumpstarterSpec +// based on the baseDomain and cluster capabilities (Route vs Ingress availability). +// It also ensures all existing endpoints have a service type enabled. +func ApplyEndpointDefaults(spec *operatorv1alpha1.JumpstarterSpec, routeAvailable, ingressAvailable bool) { + // Skip endpoint generation if no baseDomain is set + if spec.BaseDomain == "" { + return + } + + // Generate default controller gRPC endpoint if none specified + if len(spec.Controller.GRPC.Endpoints) == 0 { + endpoint := operatorv1alpha1.Endpoint{ + Address: fmt.Sprintf("grpc.%s", spec.BaseDomain), + } + ensureEndpointServiceType(&endpoint, routeAvailable, ingressAvailable) + spec.Controller.GRPC.Endpoints = []operatorv1alpha1.Endpoint{endpoint} + } else { + // Ensure existing endpoints have a service type enabled + for i := range spec.Controller.GRPC.Endpoints { + ensureEndpointServiceType(&spec.Controller.GRPC.Endpoints[i], routeAvailable, ingressAvailable) + } + } + + // Generate default router gRPC endpoints if none specified + if len(spec.Routers.GRPC.Endpoints) == 0 { + endpoint := operatorv1alpha1.Endpoint{ + // Use $(replica) placeholder for per-replica addresses + Address: fmt.Sprintf("router-$(replica).%s", spec.BaseDomain), + } + ensureEndpointServiceType(&endpoint, routeAvailable, ingressAvailable) + spec.Routers.GRPC.Endpoints = []operatorv1alpha1.Endpoint{endpoint} + } else { + // Ensure existing endpoints have a service type enabled + for i := range spec.Routers.GRPC.Endpoints { + ensureEndpointServiceType(&spec.Routers.GRPC.Endpoints[i], routeAvailable, ingressAvailable) + } + } +} diff --git a/controller/deploy/operator/internal/controller/jumpstarter/endpoints/defaults_test.go b/controller/deploy/operator/internal/controller/jumpstarter/endpoints/defaults_test.go new file mode 100644 index 000000000..ce5739de2 --- /dev/null +++ b/controller/deploy/operator/internal/controller/jumpstarter/endpoints/defaults_test.go @@ -0,0 +1,165 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package endpoints + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + operatorv1alpha1 "github.com/jumpstarter-dev/jumpstarter-controller/deploy/operator/api/v1alpha1" +) + +var _ = Describe("ApplyEndpointDefaults", func() { + Context("when baseDomain is empty", func() { + It("should skip endpoint generation", func() { + spec := &operatorv1alpha1.JumpstarterSpec{BaseDomain: ""} + + ApplyEndpointDefaults(spec, true, true) + + Expect(spec.Controller.GRPC.Endpoints).To(BeEmpty()) + Expect(spec.Routers.GRPC.Endpoints).To(BeEmpty()) + }) + }) + + Context("when baseDomain is set and no endpoints exist", func() { + It("should generate controller endpoint with Route when available", func() { + spec := &operatorv1alpha1.JumpstarterSpec{BaseDomain: "example.com"} + + ApplyEndpointDefaults(spec, true, true) + + Expect(spec.Controller.GRPC.Endpoints).To(HaveLen(1)) + Expect(spec.Controller.GRPC.Endpoints[0].Address).To(Equal("grpc.example.com")) + Expect(spec.Controller.GRPC.Endpoints[0].Route).NotTo(BeNil()) + Expect(spec.Controller.GRPC.Endpoints[0].Route.Enabled).To(BeTrue()) + }) + + It("should generate controller endpoint with Ingress when Route unavailable", func() { + spec := &operatorv1alpha1.JumpstarterSpec{BaseDomain: "example.com"} + + ApplyEndpointDefaults(spec, false, true) + + Expect(spec.Controller.GRPC.Endpoints).To(HaveLen(1)) + Expect(spec.Controller.GRPC.Endpoints[0].Ingress).NotTo(BeNil()) + Expect(spec.Controller.GRPC.Endpoints[0].Ingress.Enabled).To(BeTrue()) + }) + + It("should fallback to ClusterIP when neither Route nor Ingress available", func() { + spec := &operatorv1alpha1.JumpstarterSpec{BaseDomain: "example.com"} + + ApplyEndpointDefaults(spec, false, false) + + Expect(spec.Controller.GRPC.Endpoints).To(HaveLen(1)) + Expect(spec.Controller.GRPC.Endpoints[0].ClusterIP).NotTo(BeNil()) + Expect(spec.Controller.GRPC.Endpoints[0].ClusterIP.Enabled).To(BeTrue()) + }) + + It("should generate router endpoint with $(replica) placeholder", func() { + spec := &operatorv1alpha1.JumpstarterSpec{BaseDomain: "example.com"} + + ApplyEndpointDefaults(spec, true, true) + + Expect(spec.Routers.GRPC.Endpoints).To(HaveLen(1)) + Expect(spec.Routers.GRPC.Endpoints[0].Address).To(Equal("router-$(replica).example.com")) + Expect(spec.Routers.GRPC.Endpoints[0].Route).NotTo(BeNil()) + Expect(spec.Routers.GRPC.Endpoints[0].Route.Enabled).To(BeTrue()) + }) + }) + + Context("when endpoints already exist", func() { + It("should not override existing endpoints", func() { + spec := &operatorv1alpha1.JumpstarterSpec{ + BaseDomain: "example.com", + Controller: operatorv1alpha1.ControllerConfig{ + GRPC: operatorv1alpha1.GRPCConfig{ + Endpoints: []operatorv1alpha1.Endpoint{ + {Address: "custom.example.com", ClusterIP: &operatorv1alpha1.ClusterIPConfig{Enabled: true}}, + }, + }, + }, + } + + ApplyEndpointDefaults(spec, true, true) + + Expect(spec.Controller.GRPC.Endpoints).To(HaveLen(1)) + Expect(spec.Controller.GRPC.Endpoints[0].Address).To(Equal("custom.example.com")) + }) + + It("should ensure existing endpoints have a service type enabled", func() { + spec := &operatorv1alpha1.JumpstarterSpec{ + BaseDomain: "example.com", + Controller: operatorv1alpha1.ControllerConfig{ + GRPC: operatorv1alpha1.GRPCConfig{ + Endpoints: []operatorv1alpha1.Endpoint{ + {Address: "custom.example.com"}, // No service type + }, + }, + }, + } + + ApplyEndpointDefaults(spec, true, true) + + // Should auto-select Route since it's available + Expect(spec.Controller.GRPC.Endpoints[0].Route).NotTo(BeNil()) + Expect(spec.Controller.GRPC.Endpoints[0].Route.Enabled).To(BeTrue()) + }) + }) +}) + +var _ = Describe("ensureEndpointServiceType", func() { + Context("when endpoint already has a service type enabled", func() { + It("should not modify the endpoint", func() { + endpoint := &operatorv1alpha1.Endpoint{ + NodePort: &operatorv1alpha1.NodePortConfig{Enabled: true}, + } + + ensureEndpointServiceType(endpoint, true, true) + + // Should remain NodePort, not changed to Route + Expect(endpoint.NodePort.Enabled).To(BeTrue()) + Expect(endpoint.Route).To(BeNil()) + }) + }) + + Context("when no service type is enabled", func() { + It("should auto-select Route when available", func() { + endpoint := &operatorv1alpha1.Endpoint{} + + ensureEndpointServiceType(endpoint, true, true) + + Expect(endpoint.Route).NotTo(BeNil()) + Expect(endpoint.Route.Enabled).To(BeTrue()) + }) + + It("should auto-select Ingress when Route unavailable", func() { + endpoint := &operatorv1alpha1.Endpoint{} + + ensureEndpointServiceType(endpoint, false, true) + + Expect(endpoint.Ingress).NotTo(BeNil()) + Expect(endpoint.Ingress.Enabled).To(BeTrue()) + }) + + It("should fallback to ClusterIP when neither available", func() { + endpoint := &operatorv1alpha1.Endpoint{} + + ensureEndpointServiceType(endpoint, false, false) + + Expect(endpoint.ClusterIP).NotTo(BeNil()) + Expect(endpoint.ClusterIP.Enabled).To(BeTrue()) + }) + }) +}) diff --git a/controller/deploy/operator/internal/controller/jumpstarter/endpoints/discovery.go b/controller/deploy/operator/internal/controller/jumpstarter/endpoints/discovery.go new file mode 100644 index 000000000..8bbb084ef --- /dev/null +++ b/controller/deploy/operator/internal/controller/jumpstarter/endpoints/discovery.go @@ -0,0 +1,94 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package endpoints + +import ( + "context" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +// discoverAPIResource checks if a specific API resource is available in the cluster +// groupVersion should be in the format "group/version" (e.g., "networking.k8s.io/v1", "route.openshift.io/v1") +// kind is the resource kind to look for (e.g., "Ingress", "Route") +func discoverAPIResource(config *rest.Config, groupVersion, kind string) bool { + discoveryClient, err := discovery.NewDiscoveryClientForConfig(config) + if err != nil { + log.Log.Error(err, "Failed to create discovery client", + "groupVersion", groupVersion, + "kind", kind) + return false + } + + apiResourceList, err := discoveryClient.ServerResourcesForGroupVersion(groupVersion) + if err != nil { + // API group not found - resource not available + return false + } + + for _, resource := range apiResourceList.APIResources { + if resource.Kind == kind { + return true + } + } + + return false +} + +// detectOpenShiftBaseDomain attempts to detect the cluster's base domain from OpenShift's +// ingresses.config.openshift.io/cluster resource. Returns empty string if not available. +func detectOpenShiftBaseDomain(config *rest.Config) string { + logger := log.Log.WithName("basedomain-detection") + + // Create dynamic client for unstructured access to OpenShift config API + dynamicClient, err := dynamic.NewForConfig(config) + if err != nil { + logger.Error(err, "Failed to create dynamic client for baseDomain detection") + return "" + } + + // Define the GVR for ingresses.config.openshift.io + ingressGVR := schema.GroupVersionResource{ + Group: "config.openshift.io", + Version: "v1", + Resource: "ingresses", + } + + // Get the cluster-scoped "cluster" ingress config + ingressConfig, err := dynamicClient.Resource(ingressGVR).Get(context.Background(), "cluster", metav1.GetOptions{}) + if err != nil { + // This is expected on non-OpenShift clusters, log at debug level + logger.V(1).Info("Could not fetch OpenShift ingress config (expected on non-OpenShift clusters)", "error", err.Error()) + return "" + } + + // Extract spec.domain from the unstructured object + domain, found, err := unstructured.NestedString(ingressConfig.Object, "spec", "domain") + if err != nil || !found || domain == "" { + logger.Info("OpenShift ingress config found but spec.domain not available") + return "" + } + + logger.Info("Auto-detected OpenShift cluster domain", "domain", domain) + return domain +} diff --git a/controller/deploy/operator/internal/controller/jumpstarter/endpoints/discovery_test.go b/controller/deploy/operator/internal/controller/jumpstarter/endpoints/discovery_test.go new file mode 100644 index 000000000..f60392e82 --- /dev/null +++ b/controller/deploy/operator/internal/controller/jumpstarter/endpoints/discovery_test.go @@ -0,0 +1,165 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package endpoints + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + + operatorv1alpha1 "github.com/jumpstarter-dev/jumpstarter-controller/deploy/operator/api/v1alpha1" +) + +// createTestJumpstarterSpec creates a JumpstarterSpec with the given baseDomain for testing +func createTestJumpstarterSpec(baseDomain string) *operatorv1alpha1.JumpstarterSpec { + return &operatorv1alpha1.JumpstarterSpec{ + BaseDomain: baseDomain, + } +} + +// createOpenShiftIngressConfig creates an OpenShift Ingress cluster config for testing +func createOpenShiftIngressConfig(domain string) *unstructured.Unstructured { + ingress := &unstructured.Unstructured{} + ingress.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "config.openshift.io", + Version: "v1", + Kind: "Ingress", + }) + ingress.SetName("cluster") + ingress.Object["spec"] = map[string]interface{}{ + "domain": domain, + } + return ingress +} + +var _ = Describe("detectOpenShiftBaseDomain", func() { + // Note: These tests require OpenShift CRDs to be available in the test environment. + // They will be skipped if the CRDs are not present, which is expected in non-OpenShift environments. + + Context("when OpenShift is available", func() { + BeforeEach(func() { + // Check if OpenShift CRDs are available + ingress := createOpenShiftIngressConfig("test-check.apps.example.com") + err := k8sClient.Create(ctx, ingress) + if err != nil { + Skip("Skipping OpenShift baseDomain auto-detection tests: OpenShift CRDs not available in test environment") + } + Expect(k8sClient.Delete(ctx, ingress)).To(Succeed()) + }) + + Context("when OpenShift Ingress cluster config exists", func() { + It("should successfully auto-detect baseDomain", func() { + ingress := createOpenShiftIngressConfig("apps.example.com") + Expect(k8sClient.Create(ctx, ingress)).To(Succeed()) + DeferCleanup(func() { _ = k8sClient.Delete(ctx, ingress) }) + + Expect(detectOpenShiftBaseDomain(cfg)).To(Equal("apps.example.com")) + }) + }) + + Context("when OpenShift Ingress cluster config has empty domain", func() { + It("should return empty string", func() { + ingress := createOpenShiftIngressConfig("") + Expect(k8sClient.Create(ctx, ingress)).To(Succeed()) + DeferCleanup(func() { _ = k8sClient.Delete(ctx, ingress) }) + + Expect(detectOpenShiftBaseDomain(cfg)).To(Equal("")) + }) + }) + + Context("when OpenShift Ingress cluster config has no spec.domain", func() { + It("should return empty string", func() { + // Create a mock OpenShift Ingress cluster config without domain field + ingress := &unstructured.Unstructured{} + ingress.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "config.openshift.io", + Version: "v1", + Kind: "Ingress", + }) + ingress.SetName("cluster") + ingress.Object["spec"] = map[string]interface{}{} + + Expect(k8sClient.Create(ctx, ingress)).To(Succeed()) + DeferCleanup(func() { _ = k8sClient.Delete(ctx, ingress) }) + + Expect(detectOpenShiftBaseDomain(cfg)).To(Equal("")) + }) + }) + }) + + Context("when OpenShift Ingress cluster config does not exist", func() { + It("should return empty string", func() { + // Try to auto-detect when no Ingress config exists + // This test will work even without OpenShift CRDs because it just checks the fallback behavior + detectedDomain := detectOpenShiftBaseDomain(cfg) + Expect(detectedDomain).To(Equal("")) + }) + }) +}) + +var _ = Describe("DefaultBaseDomain in Reconciler", func() { + Context("when baseDomain is auto-detected", func() { + It("should apply default baseDomain in ApplyDefaults when spec.BaseDomain is empty", func() { + reconciler := NewReconciler(k8sClient, k8sClient.Scheme(), cfg) + + // Manually set a default baseDomain for testing + reconciler.DefaultBaseDomain = "apps.example.com" + + // Create a spec with empty baseDomain + spec := createTestJumpstarterSpec("") + + // Apply defaults with a namespace + reconciler.ApplyDefaults(spec, "test-namespace") + + // Should use the default baseDomain with namespace prefix + Expect(spec.BaseDomain).To(Equal("jumpstarter.test-namespace.apps.example.com")) + }) + + It("should not override user-provided baseDomain", func() { + reconciler := NewReconciler(k8sClient, k8sClient.Scheme(), cfg) + + // Set a default baseDomain + reconciler.DefaultBaseDomain = "apps.example.com" + + // Create a spec with user-provided baseDomain + spec := createTestJumpstarterSpec("user.custom.domain") + + // Apply defaults + reconciler.ApplyDefaults(spec, "test-namespace") + + // Should keep the user-provided baseDomain + Expect(spec.BaseDomain).To(Equal("user.custom.domain")) + }) + + It("should not set baseDomain when DefaultBaseDomain is empty", func() { + reconciler := NewReconciler(k8sClient, k8sClient.Scheme(), cfg) + + // No default baseDomain set (simulating non-OpenShift cluster) + reconciler.DefaultBaseDomain = "" + + // Create a spec with empty baseDomain + spec := createTestJumpstarterSpec("") + + // Apply defaults + reconciler.ApplyDefaults(spec, "test-namespace") + + // baseDomain should remain empty + Expect(spec.BaseDomain).To(Equal("")) + }) + }) +}) diff --git a/controller/deploy/operator/internal/controller/jumpstarter/endpoints/endpoints.go b/controller/deploy/operator/internal/controller/jumpstarter/endpoints/endpoints.go new file mode 100644 index 000000000..1a679dd0d --- /dev/null +++ b/controller/deploy/operator/internal/controller/jumpstarter/endpoints/endpoints.go @@ -0,0 +1,331 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package endpoints + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + logf "sigs.k8s.io/controller-runtime/pkg/log" + + operatorv1alpha1 "github.com/jumpstarter-dev/jumpstarter-controller/deploy/operator/api/v1alpha1" + "github.com/jumpstarter-dev/jumpstarter-controller/deploy/operator/internal/utils" +) + +// Reconciler provides endpoint reconciliation functionality +type Reconciler struct { + Client client.Client + Scheme *runtime.Scheme + IngressAvailable bool + RouteAvailable bool + DefaultBaseDomain string // Default baseDomain auto-detected from cluster (e.g., OpenShift ingress config) +} + +// NewReconciler creates a new endpoint reconciler +func NewReconciler(client client.Client, scheme *runtime.Scheme, config *rest.Config) *Reconciler { + log := logf.Log.WithName("endpoints-reconciler") + + // Discover API availability at initialization + ingressAvailable := discoverAPIResource(config, "networking.k8s.io/v1", "Ingress") + routeAvailable := discoverAPIResource(config, "route.openshift.io/v1", "Route") + + // Attempt to auto-detect default baseDomain on OpenShift clusters + var defaultBaseDomain string + if routeAvailable { + defaultBaseDomain = detectOpenShiftBaseDomain(config) + } + + log.Info("API discovery completed", + "ingressAvailable", ingressAvailable, + "routeAvailable", routeAvailable, + "defaultBaseDomain", defaultBaseDomain) + + return &Reconciler{ + Client: client, + Scheme: scheme, + IngressAvailable: ingressAvailable, + RouteAvailable: routeAvailable, + DefaultBaseDomain: defaultBaseDomain, + } +} + +// ApplyDefaults applies endpoint defaults to a JumpstarterSpec using the +// reconciler's discovered cluster capabilities (Route vs Ingress availability). +// If baseDomain is not provided in the spec, it will generate one using the pattern +// jumpstarter.$namespace.$clusterDomain (auto-detected from OpenShift cluster config). +func (r *Reconciler) ApplyDefaults(spec *operatorv1alpha1.JumpstarterSpec, namespace string) { + if spec.BaseDomain == "" && r.DefaultBaseDomain != "" { + spec.BaseDomain = fmt.Sprintf("jumpstarter.%s.%s", namespace, r.DefaultBaseDomain) + } + ApplyEndpointDefaults(spec, r.RouteAvailable, r.IngressAvailable) +} + +// createOrUpdateService creates or updates a service with proper handling of immutable fields +// and owner references. This is the unified service creation method. +func (r *Reconciler) createOrUpdateService(ctx context.Context, service *corev1.Service, owner metav1.Object) error { + log := logf.FromContext(ctx) + + existingService := &corev1.Service{} + existingService.Name = service.Name + existingService.Namespace = service.Namespace + + op, err := controllerutil.CreateOrUpdate(ctx, r.Client, existingService, func() error { + // Preserve immutable fields if service already exists + if existingService.CreationTimestamp.IsZero() { + // Service is being created, copy all fields from desired service + existingService.Spec = service.Spec + existingService.Labels = service.Labels + existingService.Annotations = service.Annotations + return controllerutil.SetControllerReference(owner, existingService, r.Scheme) + + } else { + // Preserve existing NodePorts to prevent "port already allocated" errors + if service.Spec.Type == corev1.ServiceTypeNodePort || service.Spec.Type == corev1.ServiceTypeLoadBalancer { + for i := range existingService.Spec.Ports { + if existingService.Spec.Ports[i].NodePort != 0 && i < len(service.Spec.Ports) { + service.Spec.Ports[i].NodePort = existingService.Spec.Ports[i].NodePort + } + } + } + + // Update all mutable fields + if service.Spec.LoadBalancerClass != nil && *service.Spec.LoadBalancerClass != "" { + existingService.Spec.LoadBalancerClass = service.Spec.LoadBalancerClass + } + if service.Spec.ExternalTrafficPolicy != "" { + existingService.Spec.ExternalTrafficPolicy = service.Spec.ExternalTrafficPolicy + } + + existingService.Spec.Ports = service.Spec.Ports + existingService.Spec.Selector = service.Spec.Selector + existingService.Spec.Type = service.Spec.Type + existingService.Labels = service.Labels + existingService.Annotations = service.Annotations + return controllerutil.SetControllerReference(owner, existingService, r.Scheme) + } + }) + + if err != nil { + log.Error(err, "Failed to reconcile service", + "name", service.Name, + "namespace", service.Namespace, + "type", service.Spec.Type) + return err + } + + log.Info("Service reconciled", + "name", service.Name, + "namespace", service.Namespace, + "type", service.Spec.Type, + "selector", service.Spec.Selector, + "operation", op) + + return nil +} + +// ReconcileControllerEndpoint reconciles a controller endpoint service with proper pod selector +// This function creates a separate service for each enabled service type (ClusterIP, NodePort, LoadBalancer) +func (r *Reconciler) ReconcileControllerEndpoint(ctx context.Context, owner metav1.Object, endpoint *operatorv1alpha1.Endpoint, servicePort corev1.ServicePort) error { + // Controller pods have fixed labels: app=jumpstarter-controller + // We need to create a service with selector matching those labels + baseLabels := map[string]string{ + "component": "controller", + "app": "jumpstarter-controller", + "controller": owner.GetName(), + } + + // Pod selector for controller pods + podSelector := map[string]string{ + "app": "jumpstarter-controller", + } + + // Create a service for each enabled service type + // This allows multiple service types to coexist for the same endpoint + // Note: ClusterIP uses no suffix (most common for in-cluster communication) + // LoadBalancer uses "-lb" suffix, NodePort uses "-np" suffix + + // Ingress resource (uses ClusterIP service) + if endpoint.Ingress != nil && endpoint.Ingress.Enabled { + serviceName := servicePort.Name + // Create the Ingress resource pointing to the ClusterIP service + if err := r.createIngressForEndpoint(ctx, owner, serviceName, servicePort.Port, endpoint, baseLabels); err != nil { + return err + } + } + + // Route resource (uses ClusterIP service) + if endpoint.Route != nil && endpoint.Route.Enabled { + serviceName := servicePort.Name + // Create the Route resource pointing to the ClusterIP service + if err := r.createRouteForEndpoint(ctx, owner, serviceName, servicePort.Port, endpoint, baseLabels); err != nil { + return err + } + } + + // LoadBalancer service + if endpoint.LoadBalancer != nil && endpoint.LoadBalancer.Enabled { + if err := r.createService(ctx, owner, servicePort, "-lb", corev1.ServiceTypeLoadBalancer, + podSelector, baseLabels, endpoint.LoadBalancer.Annotations, endpoint.LoadBalancer.Labels); err != nil { + return err + } + } + + // NodePort service + if endpoint.NodePort != nil && endpoint.NodePort.Enabled { + if err := r.createService(ctx, owner, servicePort, "-np", corev1.ServiceTypeNodePort, + podSelector, baseLabels, endpoint.NodePort.Annotations, endpoint.NodePort.Labels); err != nil { + return err + } + } + + // ClusterIP service (no suffix for cleaner in-cluster service names) + // Create ClusterIP if explicitly enabled OR if Ingress/Route need it + if (endpoint.ClusterIP != nil && endpoint.ClusterIP.Enabled) || + (endpoint.Ingress != nil && endpoint.Ingress.Enabled) || + (endpoint.Route != nil && endpoint.Route.Enabled) { + // Merge annotations and labels from ClusterIP config if present + var annotations, labels map[string]string + if endpoint.ClusterIP != nil { + annotations = endpoint.ClusterIP.Annotations + labels = endpoint.ClusterIP.Labels + } + if err := r.createService(ctx, owner, servicePort, "", corev1.ServiceTypeClusterIP, + podSelector, baseLabels, annotations, labels); err != nil { + return err + } + } + + return nil +} + +// ReconcileRouterReplicaEndpoint reconciles service, ingress, and route for a specific router replica endpoint +// This function creates a separate service for each enabled service type (ClusterIP, NodePort, LoadBalancer) +func (r *Reconciler) ReconcileRouterReplicaEndpoint(ctx context.Context, owner metav1.Object, replicaIndex int32, endpointIdx int, endpoint *operatorv1alpha1.Endpoint, servicePort corev1.ServicePort) error { + // IMPORTANT: The pod selector must match the actual pod labels + // Router pods have label: app: jumpstarter-router-0 (for replica 0) + baseAppLabel := fmt.Sprintf("%s-router-%d", owner.GetName(), replicaIndex) + + baseLabels := map[string]string{ + "component": "router", + "router": owner.GetName(), + "router-index": fmt.Sprintf("%d", replicaIndex), + "endpoint-idx": fmt.Sprintf("%d", endpointIdx), + } + + // Pod selector - this MUST match the deployment's pod template labels + podSelector := map[string]string{ + "app": baseAppLabel, // e.g., "jumpstarter-router-0" + } + + // Create a service for each enabled service type + // This allows multiple service types to coexist for the same endpoint + // Note: ClusterIP uses no suffix (most common for in-cluster communication) + // LoadBalancer uses "-lb" suffix, NodePort uses "-np" suffix + + // Ingress resource (uses ClusterIP service) + if endpoint.Ingress != nil && endpoint.Ingress.Enabled { + serviceName := servicePort.Name + // Create the Ingress resource pointing to the ClusterIP service + if err := r.createIngressForEndpoint(ctx, owner, serviceName, servicePort.Port, endpoint, baseLabels); err != nil { + return err + } + } + + // Route resource (uses ClusterIP service) + if endpoint.Route != nil && endpoint.Route.Enabled { + serviceName := servicePort.Name + // Create the Route resource pointing to the ClusterIP service + if err := r.createRouteForEndpoint(ctx, owner, serviceName, servicePort.Port, endpoint, baseLabels); err != nil { + return err + } + } + + // LoadBalancer service + if endpoint.LoadBalancer != nil && endpoint.LoadBalancer.Enabled { + if err := r.createService(ctx, owner, servicePort, "-lb", corev1.ServiceTypeLoadBalancer, + podSelector, baseLabels, endpoint.LoadBalancer.Annotations, endpoint.LoadBalancer.Labels); err != nil { + return err + } + } + + // NodePort service + if endpoint.NodePort != nil && endpoint.NodePort.Enabled { + if err := r.createService(ctx, owner, servicePort, "-np", corev1.ServiceTypeNodePort, + podSelector, baseLabels, endpoint.NodePort.Annotations, endpoint.NodePort.Labels); err != nil { + return err + } + } + + // ClusterIP service (no suffix for cleaner in-cluster service names) + // Create ClusterIP if explicitly enabled OR if Ingress/Route need it + if (endpoint.ClusterIP != nil && endpoint.ClusterIP.Enabled) || + (endpoint.Ingress != nil && endpoint.Ingress.Enabled) || + (endpoint.Route != nil && endpoint.Route.Enabled) { + // Merge annotations and labels from ClusterIP config if present + var annotations, labels map[string]string + if endpoint.ClusterIP != nil { + annotations = endpoint.ClusterIP.Annotations + labels = endpoint.ClusterIP.Labels + } + if err := r.createService(ctx, owner, servicePort, "", corev1.ServiceTypeClusterIP, + podSelector, baseLabels, annotations, labels); err != nil { + return err + } + } + + return nil +} + +// createService creates or updates a single service with the specified type and suffix +// This is the unified service creation method that uses createOrUpdateService internally +func (r *Reconciler) createService(ctx context.Context, owner metav1.Object, servicePort corev1.ServicePort, + nameSuffix string, serviceType corev1.ServiceType, podSelector map[string]string, + baseLabels map[string]string, annotations map[string]string, extraLabels map[string]string) error { + + // Build service name with suffix to avoid conflicts + serviceName := servicePort.Name + nameSuffix + + // Merge labels (extra labels take precedence) + serviceLabels := utils.MergeMaps(baseLabels, extraLabels) + + // Ensure annotations map is initialized + if annotations == nil { + annotations = make(map[string]string) + } + + service := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: serviceName, + Namespace: owner.GetNamespace(), + Labels: serviceLabels, + Annotations: annotations, + }, + Spec: corev1.ServiceSpec{ + Selector: podSelector, // Use the provided pod selector map + Ports: []corev1.ServicePort{servicePort}, + Type: serviceType, + }, + } + + return r.createOrUpdateService(ctx, service, owner) +} diff --git a/controller/deploy/operator/internal/controller/jumpstarter/endpoints/endpoints_test.go b/controller/deploy/operator/internal/controller/jumpstarter/endpoints/endpoints_test.go new file mode 100644 index 000000000..4dc077a54 --- /dev/null +++ b/controller/deploy/operator/internal/controller/jumpstarter/endpoints/endpoints_test.go @@ -0,0 +1,622 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package endpoints + +import ( + "context" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + + operatorv1alpha1 "github.com/jumpstarter-dev/jumpstarter-controller/deploy/operator/api/v1alpha1" +) + +var _ = Describe("Endpoints Reconciler", func() { + Context("When reconciling controller endpoints", func() { + const ( + namespace = "test-namespace" + controllerName = "test-controller" + ) + + ctx := context.Background() + var reconciler *Reconciler + var owner *corev1.ConfigMap // Use ConfigMap as a simple owner object for testing + + BeforeEach(func() { + reconciler = NewReconciler(k8sClient, k8sClient.Scheme(), cfg) + + // Create the test namespace + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespace, + }, + } + err := k8sClient.Create(ctx, ns) + if err != nil && !errors.IsAlreadyExists(err) { + Expect(err).NotTo(HaveOccurred()) + } + + // Create an owner object for testing + owner = &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: controllerName, + Namespace: namespace, + }, + } + err = k8sClient.Create(ctx, owner) + if err != nil && !errors.IsAlreadyExists(err) { + Expect(err).NotTo(HaveOccurred()) + } + }) + + Context("with ClusterIP service type", func() { + It("should create a ClusterIP service successfully", func() { + endpoint := &operatorv1alpha1.Endpoint{ + Address: "controller", + ClusterIP: &operatorv1alpha1.ClusterIPConfig{ + Enabled: true, + }, + } + + svcPort := corev1.ServicePort{ + Name: "controller", + Port: 9090, + TargetPort: intstr.FromInt(9090), + Protocol: corev1.ProtocolTCP, + } + + err := reconciler.ReconcileControllerEndpoint(ctx, owner, endpoint, svcPort) + Expect(err).NotTo(HaveOccurred()) + + // Verify the service was created + service := &corev1.Service{} + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: "controller", + Namespace: namespace, + }, service) + Expect(err).NotTo(HaveOccurred()) + Expect(service.Spec.Type).To(Equal(corev1.ServiceTypeClusterIP)) + Expect(service.Spec.Selector["app"]).To(Equal("jumpstarter-controller")) + Expect(service.Labels["app"]).To(Equal("jumpstarter-controller")) + }) + }) + + Context("with LoadBalancer service type", func() { + It("should create a LoadBalancer service successfully", func() { + endpoint := &operatorv1alpha1.Endpoint{ + Address: "controller", + LoadBalancer: &operatorv1alpha1.LoadBalancerConfig{ + Enabled: true, + Annotations: map[string]string{"service.beta.kubernetes.io/aws-load-balancer-type": "nlb"}, + Labels: map[string]string{"environment": "production"}, + }, + } + + svcPort := corev1.ServicePort{ + Name: "controller", + Port: 9090, + TargetPort: intstr.FromInt(9090), + Protocol: corev1.ProtocolTCP, + } + + err := reconciler.ReconcileControllerEndpoint(ctx, owner, endpoint, svcPort) + Expect(err).NotTo(HaveOccurred()) + + // Verify the service was created with -lb suffix + service := &corev1.Service{} + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: "controller-lb", + Namespace: namespace, + }, service) + Expect(err).NotTo(HaveOccurred()) + Expect(service.Spec.Type).To(Equal(corev1.ServiceTypeLoadBalancer)) + Expect(service.Annotations["service.beta.kubernetes.io/aws-load-balancer-type"]).To(Equal("nlb")) + Expect(service.Labels["environment"]).To(Equal("production")) + Expect(service.Spec.Selector["app"]).To(Equal("jumpstarter-controller")) + }) + }) + + Context("with NodePort service type", func() { + It("should create a NodePort service successfully", func() { + endpoint := &operatorv1alpha1.Endpoint{ + Address: "controller", + NodePort: &operatorv1alpha1.NodePortConfig{ + Enabled: true, + Port: 30090, + Annotations: map[string]string{"nodeport.kubernetes.io/port": "30090"}, + Labels: map[string]string{"nodeport": "true"}, + }, + } + + svcPort := corev1.ServicePort{ + Name: "controller", + Port: 9090, + TargetPort: intstr.FromInt(9090), + Protocol: corev1.ProtocolTCP, + } + + err := reconciler.ReconcileControllerEndpoint(ctx, owner, endpoint, svcPort) + Expect(err).NotTo(HaveOccurred()) + + // Verify the service was created with -np suffix + service := &corev1.Service{} + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: "controller-np", + Namespace: namespace, + }, service) + Expect(err).NotTo(HaveOccurred()) + Expect(service.Spec.Type).To(Equal(corev1.ServiceTypeNodePort)) + Expect(service.Annotations["nodeport.kubernetes.io/port"]).To(Equal("30090")) + Expect(service.Labels["nodeport"]).To(Equal("true")) + Expect(service.Spec.Selector["app"]).To(Equal("jumpstarter-controller")) + }) + }) + + Context("with multiple service types enabled", func() { + It("should create all enabled service types", func() { + endpoint := &operatorv1alpha1.Endpoint{ + Address: "controller", + LoadBalancer: &operatorv1alpha1.LoadBalancerConfig{ + Enabled: true, + }, + NodePort: &operatorv1alpha1.NodePortConfig{ + Enabled: true, + }, + } + + svcPort := corev1.ServicePort{ + Name: "controller", + Port: 9090, + TargetPort: intstr.FromInt(9090), + Protocol: corev1.ProtocolTCP, + } + + err := reconciler.ReconcileControllerEndpoint(ctx, owner, endpoint, svcPort) + Expect(err).NotTo(HaveOccurred()) + + // Verify LoadBalancer service was created + lbService := &corev1.Service{} + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: "controller-lb", + Namespace: namespace, + }, lbService) + Expect(err).NotTo(HaveOccurred()) + Expect(lbService.Spec.Type).To(Equal(corev1.ServiceTypeLoadBalancer)) + + // Verify NodePort service was created + npService := &corev1.Service{} + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: "controller-np", + Namespace: namespace, + }, npService) + Expect(err).NotTo(HaveOccurred()) + Expect(npService.Spec.Type).To(Equal(corev1.ServiceTypeNodePort)) + }) + }) + + Context("when updating an existing service", func() { + It("should update the service when configuration changes", func() { + // Create initial service + endpoint := &operatorv1alpha1.Endpoint{ + Address: "controller", + LoadBalancer: &operatorv1alpha1.LoadBalancerConfig{ + Enabled: true, + Annotations: map[string]string{"initial": "annotation"}, + Labels: map[string]string{"initial": "label"}, + }, + } + + svcPort := corev1.ServicePort{ + Name: "controller", + Port: 9090, + TargetPort: intstr.FromInt(9090), + Protocol: corev1.ProtocolTCP, + } + + err := reconciler.ReconcileControllerEndpoint(ctx, owner, endpoint, svcPort) + Expect(err).NotTo(HaveOccurred()) + + // Update the endpoint configuration + endpoint.LoadBalancer.Annotations["updated"] = "annotation" + endpoint.LoadBalancer.Labels["updated"] = "label" + + err = reconciler.ReconcileControllerEndpoint(ctx, owner, endpoint, svcPort) + Expect(err).NotTo(HaveOccurred()) + + // Verify the service was updated + service := &corev1.Service{} + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: "controller-lb", + Namespace: namespace, + }, service) + Expect(err).NotTo(HaveOccurred()) + Expect(service.Annotations["updated"]).To(Equal("annotation")) + Expect(service.Labels["updated"]).To(Equal("label")) + }) + }) + + Context("with Ingress enabled", func() { + It("should create a ClusterIP service and Ingress with default nginx annotations", func() { + endpoint := &operatorv1alpha1.Endpoint{ + Address: "grpc.example.com:443", + Ingress: &operatorv1alpha1.IngressConfig{ + Enabled: true, + Class: "nginx", + }, + } + + svcPort := corev1.ServicePort{ + Name: "controller-grpc", + Port: 8082, + TargetPort: intstr.FromInt(8082), + Protocol: corev1.ProtocolTCP, + } + + err := reconciler.ReconcileControllerEndpoint(ctx, owner, endpoint, svcPort) + Expect(err).NotTo(HaveOccurred()) + + // Verify the ClusterIP service was created (used by ingress) + service := &corev1.Service{} + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: "controller-grpc", + Namespace: namespace, + }, service) + Expect(err).NotTo(HaveOccurred()) + Expect(service.Spec.Type).To(Equal(corev1.ServiceTypeClusterIP)) + Expect(service.Spec.Selector["app"]).To(Equal("jumpstarter-controller")) + + // Verify the Ingress was created + ingress := &networkingv1.Ingress{} + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: "controller-grpc-ing", + Namespace: namespace, + }, ingress) + Expect(err).NotTo(HaveOccurred()) + + // Verify ingress class + Expect(ingress.Spec.IngressClassName).NotTo(BeNil()) + Expect(*ingress.Spec.IngressClassName).To(Equal("nginx")) + + // Verify default nginx annotations for TLS passthrough + Expect(ingress.Annotations["nginx.ingress.kubernetes.io/ssl-redirect"]).To(Equal("true")) + Expect(ingress.Annotations["nginx.ingress.kubernetes.io/backend-protocol"]).To(Equal("GRPC")) + Expect(ingress.Annotations["nginx.ingress.kubernetes.io/proxy-read-timeout"]).To(Equal("300")) + Expect(ingress.Annotations["nginx.ingress.kubernetes.io/proxy-send-timeout"]).To(Equal("300")) + Expect(ingress.Annotations["nginx.ingress.kubernetes.io/ssl-passthrough"]).To(Equal("true")) + + // Verify ingress rules + Expect(ingress.Spec.Rules).To(HaveLen(1)) + Expect(ingress.Spec.Rules[0].Host).To(Equal("grpc.example.com")) + Expect(ingress.Spec.Rules[0].HTTP.Paths).To(HaveLen(1)) + Expect(ingress.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Name).To(Equal("controller-grpc")) + Expect(ingress.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Port.Number).To(Equal(int32(8082))) + + // Verify TLS config + Expect(ingress.Spec.TLS).To(HaveLen(1)) + Expect(ingress.Spec.TLS[0].Hosts).To(ContainElement("grpc.example.com")) + }) + + It("should merge user annotations with defaults (user takes precedence)", func() { + endpoint := &operatorv1alpha1.Endpoint{ + Address: "grpc.example.com", + Ingress: &operatorv1alpha1.IngressConfig{ + Enabled: true, + Class: "nginx", + Annotations: map[string]string{ + "nginx.ingress.kubernetes.io/ssl-redirect": "false", // override default + "custom.annotation/key": "custom-value", + }, + Labels: map[string]string{ + "environment": "production", + }, + }, + } + + svcPort := corev1.ServicePort{ + Name: "controller-grpc", + Port: 8082, + TargetPort: intstr.FromInt(8082), + Protocol: corev1.ProtocolTCP, + } + + err := reconciler.ReconcileControllerEndpoint(ctx, owner, endpoint, svcPort) + Expect(err).NotTo(HaveOccurred()) + + // Verify the Ingress was created + ingress := &networkingv1.Ingress{} + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: "controller-grpc-ing", + Namespace: namespace, + }, ingress) + Expect(err).NotTo(HaveOccurred()) + + // User annotation should override default + Expect(ingress.Annotations["nginx.ingress.kubernetes.io/ssl-redirect"]).To(Equal("false")) + // Custom annotation should be present + Expect(ingress.Annotations["custom.annotation/key"]).To(Equal("custom-value")) + // Other defaults should still be present + Expect(ingress.Annotations["nginx.ingress.kubernetes.io/backend-protocol"]).To(Equal("GRPC")) + + // User labels should be present + Expect(ingress.Labels["environment"]).To(Equal("production")) + }) + + It("should extract hostname from various address formats", func() { + testCases := []struct { + address string + expectedHost string + }{ + {"grpc.example.com", "grpc.example.com"}, + {"grpc.example.com:443", "grpc.example.com"}, + {"grpc.example.com:8080", "grpc.example.com"}, + } + + for _, tc := range testCases { + endpoint := &operatorv1alpha1.Endpoint{ + Address: tc.address, + Ingress: &operatorv1alpha1.IngressConfig{ + Enabled: true, + }, + } + + svcPort := corev1.ServicePort{ + Name: "test-svc", + Port: 8082, + TargetPort: intstr.FromInt(8082), + Protocol: corev1.ProtocolTCP, + } + + err := reconciler.ReconcileControllerEndpoint(ctx, owner, endpoint, svcPort) + Expect(err).NotTo(HaveOccurred()) + + // Verify ingress was created with correct hostname + ingress := &networkingv1.Ingress{} + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: "test-svc-ing", + Namespace: namespace, + }, ingress) + Expect(err).NotTo(HaveOccurred()) + Expect(ingress.Spec.Rules[0].Host).To(Equal(tc.expectedHost)) + + // Clean up + _ = k8sClient.Delete(ctx, ingress) + svc := &corev1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-svc", Namespace: namespace}} + _ = k8sClient.Delete(ctx, svc) + } + }) + + It("should not set ingress class when not specified", func() { + endpoint := &operatorv1alpha1.Endpoint{ + Address: "grpc.example.com", + Ingress: &operatorv1alpha1.IngressConfig{ + Enabled: true, + // Class not specified - will use cluster default + }, + } + + svcPort := corev1.ServicePort{ + Name: "controller-grpc", + Port: 8082, + TargetPort: intstr.FromInt(8082), + Protocol: corev1.ProtocolTCP, + } + + err := reconciler.ReconcileControllerEndpoint(ctx, owner, endpoint, svcPort) + Expect(err).NotTo(HaveOccurred()) + + // Verify ingress class is nil (will use cluster default IngressClass) + ingress := &networkingv1.Ingress{} + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: "controller-grpc-ing", + Namespace: namespace, + }, ingress) + Expect(err).NotTo(HaveOccurred()) + Expect(ingress.Spec.IngressClassName).To(BeNil()) + }) + }) + + AfterEach(func() { + // Clean up created services + services := []string{"controller", "controller-lb", "controller-np", "controller-grpc", "test-svc"} + for _, svcName := range services { + service := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: svcName, + Namespace: namespace, + }, + } + _ = k8sClient.Delete(ctx, service) + } + + // Clean up ingresses + ingresses := []string{"controller-grpc-ing", "test-svc-ing"} + for _, ingName := range ingresses { + ingress := &networkingv1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: ingName, + Namespace: namespace, + }, + } + _ = k8sClient.Delete(ctx, ingress) + } + + // Clean up owner + _ = k8sClient.Delete(ctx, owner) + }) + }) + + Context("When reconciling router replica endpoints", func() { + const ( + namespace = "test-namespace" + routerName = "test-router" + replicaIdx = int32(0) + endpointIdx = 0 + ) + + ctx := context.Background() + var reconciler *Reconciler + var owner *corev1.ConfigMap + + BeforeEach(func() { + reconciler = NewReconciler(k8sClient, k8sClient.Scheme(), cfg) + + // Create the test namespace + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespace, + }, + } + err := k8sClient.Create(ctx, ns) + if err != nil && !errors.IsAlreadyExists(err) { + Expect(err).NotTo(HaveOccurred()) + } + + // Create an owner object for testing + owner = &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: routerName, + Namespace: namespace, + }, + } + err = k8sClient.Create(ctx, owner) + if err != nil && !errors.IsAlreadyExists(err) { + Expect(err).NotTo(HaveOccurred()) + } + }) + + Context("with proper pod selector", func() { + It("should create a service with correct pod selector matching deployment labels", func() { + endpoint := &operatorv1alpha1.Endpoint{ + Address: "router", + NodePort: &operatorv1alpha1.NodePortConfig{ + Enabled: true, + }, + } + + svcPort := corev1.ServicePort{ + Name: "router", + Port: 8083, + TargetPort: intstr.FromInt(8083), + Protocol: corev1.ProtocolTCP, + } + + err := reconciler.ReconcileRouterReplicaEndpoint(ctx, owner, replicaIdx, endpointIdx, endpoint, svcPort) + Expect(err).NotTo(HaveOccurred()) + + // Verify the service was created with correct pod selector + service := &corev1.Service{} + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: "router-np", + Namespace: namespace, + }, service) + Expect(err).NotTo(HaveOccurred()) + // The selector should be "app: test-router-router-0" (owner.Name + "-router-" + replicaIndex) + Expect(service.Spec.Selector["app"]).To(Equal("test-router-router-0")) + Expect(service.Labels["router"]).To(Equal(routerName)) + Expect(service.Labels["router-index"]).To(Equal("0")) + }) + }) + + Context("with Ingress enabled for router", func() { + It("should create a ClusterIP service and Ingress for router replica", func() { + endpoint := &operatorv1alpha1.Endpoint{ + Address: "router-0.example.com", + Ingress: &operatorv1alpha1.IngressConfig{ + Enabled: true, + Class: "nginx", + Annotations: map[string]string{ + "router.annotation": "value", + }, + }, + } + + svcPort := corev1.ServicePort{ + Name: "router-grpc", + Port: 8083, + TargetPort: intstr.FromInt(8083), + Protocol: corev1.ProtocolTCP, + } + + err := reconciler.ReconcileRouterReplicaEndpoint(ctx, owner, replicaIdx, endpointIdx, endpoint, svcPort) + Expect(err).NotTo(HaveOccurred()) + + // Verify the ClusterIP service was created + service := &corev1.Service{} + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: "router-grpc", + Namespace: namespace, + }, service) + Expect(err).NotTo(HaveOccurred()) + Expect(service.Spec.Type).To(Equal(corev1.ServiceTypeClusterIP)) + Expect(service.Spec.Selector["app"]).To(Equal("test-router-router-0")) + + // Verify the Ingress was created + ingress := &networkingv1.Ingress{} + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: "router-grpc-ing", + Namespace: namespace, + }, ingress) + Expect(err).NotTo(HaveOccurred()) + + // Verify ingress configuration + Expect(*ingress.Spec.IngressClassName).To(Equal("nginx")) + Expect(ingress.Spec.Rules[0].Host).To(Equal("router-0.example.com")) + Expect(ingress.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Name).To(Equal("router-grpc")) + + // Verify user and default annotations + Expect(ingress.Annotations["router.annotation"]).To(Equal("value")) + Expect(ingress.Annotations["nginx.ingress.kubernetes.io/ssl-passthrough"]).To(Equal("true")) + }) + }) + + AfterEach(func() { + // Clean up created services + services := []string{"router", "router-lb", "router-np", "router-ing", "router-route", "router-grpc"} + for _, svcName := range services { + service := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: svcName, + Namespace: namespace, + }, + } + _ = k8sClient.Delete(ctx, service) + } + + // Clean up ingresses + ingresses := []string{"router-grpc-ing"} + for _, ingName := range ingresses { + ingress := &networkingv1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: ingName, + Namespace: namespace, + }, + } + _ = k8sClient.Delete(ctx, ingress) + } + + // Clean up owner + _ = k8sClient.Delete(ctx, owner) + }) + }) + +}) diff --git a/controller/deploy/operator/internal/controller/jumpstarter/endpoints/ingress.go b/controller/deploy/operator/internal/controller/jumpstarter/endpoints/ingress.go new file mode 100644 index 000000000..199fda698 --- /dev/null +++ b/controller/deploy/operator/internal/controller/jumpstarter/endpoints/ingress.go @@ -0,0 +1,191 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package endpoints + +import ( + "context" + "errors" + "strings" + + networkingv1 "k8s.io/api/networking/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/validation" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + logf "sigs.k8s.io/controller-runtime/pkg/log" + + operatorv1alpha1 "github.com/jumpstarter-dev/jumpstarter-controller/deploy/operator/api/v1alpha1" + "github.com/jumpstarter-dev/jumpstarter-controller/deploy/operator/internal/utils" +) + +// createOrUpdateIngress creates or updates an ingress with proper handling of mutable fields +// and owner references. This follows the same pattern as createOrUpdateService. +func (r *Reconciler) createOrUpdateIngress(ctx context.Context, ingress *networkingv1.Ingress, owner metav1.Object) error { + log := logf.FromContext(ctx) + + existingIngress := &networkingv1.Ingress{} + existingIngress.Name = ingress.Name + existingIngress.Namespace = ingress.Namespace + + op, err := controllerutil.CreateOrUpdate(ctx, r.Client, existingIngress, func() error { + // Update all mutable fields + existingIngress.Labels = ingress.Labels + existingIngress.Annotations = ingress.Annotations + existingIngress.Spec.IngressClassName = ingress.Spec.IngressClassName + existingIngress.Spec.Rules = ingress.Spec.Rules + existingIngress.Spec.TLS = ingress.Spec.TLS + + return controllerutil.SetControllerReference(owner, existingIngress, r.Scheme) + }) + + if err != nil { + log.Error(err, "Failed to reconcile ingress", + "name", ingress.Name, + "namespace", ingress.Namespace) + return err + } + + log.Info("Ingress reconciled", + "name", ingress.Name, + "namespace", ingress.Namespace, + "operation", op) + + return nil +} + +// extractHostname extracts the hostname from an endpoint address. +// It handles formats like: "hostname", "hostname:port", "IPv4:port", "[IPv6]", "[IPv6]:port" +func extractHostname(address string) string { + // Handle IPv6 addresses in brackets + if strings.HasPrefix(address, "[") { + // Find the closing bracket + if idx := strings.Index(address, "]"); idx != -1 { + return address[1:idx] + } + return address + } + + // For hostname or IPv4, strip port if present + if idx := strings.LastIndex(address, ":"); idx != -1 { + // Check if this is part of an IPv6 address (no brackets) + // Count colons - if more than one, likely IPv6 + if strings.Count(address, ":") > 1 { + return address + } + return address[:idx] + } + + return address +} + +// createIngressForEndpoint creates an ingress for a specific endpoint. +// The ingress points to the ClusterIP service (serviceName with no suffix). +func (r *Reconciler) createIngressForEndpoint(ctx context.Context, owner metav1.Object, serviceName string, servicePort int32, + endpoint *operatorv1alpha1.Endpoint, baseLabels map[string]string) error { + + log := logf.FromContext(ctx) + + // Check if Ingress API is available in the cluster + if !r.IngressAvailable { + log.Info("Skipping ingress creation: Ingress API not available in cluster") + // TODO: update status of the jumpstarter object to indicate that the ingress is not available + return nil + } + + // Extract hostname from address + hostname := extractHostname(endpoint.Address) + if hostname == "" { + log.Info("Skipping ingress creation: no hostname in endpoint address", + "address", endpoint.Address) + return nil + } + + if errs := validation.IsDNS1123Subdomain(hostname); errs != nil { + log := logf.FromContext(ctx) + log.Error(errors.New(strings.Join(errs, ", ")), "Skipping ingress creation: invalid hostname", + "address", endpoint.Address, + "hostname", hostname) + // TODO: propagate error to status conditions + return nil + } + + // Build default annotations for TLS passthrough with GRPC with nginx ingress + defaultAnnotations := map[string]string{ + "nginx.ingress.kubernetes.io/ssl-redirect": "true", + "nginx.ingress.kubernetes.io/backend-protocol": "GRPC", + "nginx.ingress.kubernetes.io/proxy-read-timeout": "300", + "nginx.ingress.kubernetes.io/proxy-send-timeout": "300", + "nginx.ingress.kubernetes.io/ssl-passthrough": "true", + } + + // Merge with user-provided annotations (user annotations take precedence) + annotations := utils.MergeMaps(defaultAnnotations, endpoint.Ingress.Annotations) + + // Merge labels (user labels take precedence) + ingressLabels := utils.MergeMaps(baseLabels, endpoint.Ingress.Labels) + + // Set ingress class name (only if specified, cannot be empty string) + var ingressClassName *string + if endpoint.Ingress.Class != "" { + ingressClassName = &endpoint.Ingress.Class + } + + // Build path type + pathTypePrefix := networkingv1.PathTypePrefix + + ingress := &networkingv1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: serviceName + "-ing", + Namespace: owner.GetNamespace(), + Labels: ingressLabels, + Annotations: annotations, + }, + Spec: networkingv1.IngressSpec{ + IngressClassName: ingressClassName, + Rules: []networkingv1.IngressRule{ + { + Host: hostname, + IngressRuleValue: networkingv1.IngressRuleValue{ + HTTP: &networkingv1.HTTPIngressRuleValue{ + Paths: []networkingv1.HTTPIngressPath{ + { + Path: "/", + PathType: &pathTypePrefix, + Backend: networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: serviceName, + Port: networkingv1.ServiceBackendPort{ + Number: servicePort, + }, + }, + }, + }, + }, + }, + }, + }, + }, + TLS: []networkingv1.IngressTLS{ + { + Hosts: []string{hostname}, + // No SecretName - passthrough mode handles TLS at the backend + }, + }, + }, + } + + return r.createOrUpdateIngress(ctx, ingress, owner) +} diff --git a/controller/deploy/operator/internal/controller/jumpstarter/endpoints/route.go b/controller/deploy/operator/internal/controller/jumpstarter/endpoints/route.go new file mode 100644 index 000000000..fc488ff7e --- /dev/null +++ b/controller/deploy/operator/internal/controller/jumpstarter/endpoints/route.go @@ -0,0 +1,147 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package endpoints + +import ( + "context" + "errors" + "strings" + + routev1 "github.com/openshift/api/route/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + logf "sigs.k8s.io/controller-runtime/pkg/log" + + operatorv1alpha1 "github.com/jumpstarter-dev/jumpstarter-controller/deploy/operator/api/v1alpha1" + "github.com/jumpstarter-dev/jumpstarter-controller/deploy/operator/internal/utils" +) + +// createOrUpdateRoute creates or updates a route with proper handling of mutable fields +// and owner references. This follows the same pattern as createOrUpdateService and createOrUpdateIngress. +func (r *Reconciler) createOrUpdateRoute(ctx context.Context, route *routev1.Route, owner metav1.Object) error { + log := logf.FromContext(ctx) + + existingRoute := &routev1.Route{} + existingRoute.Name = route.Name + existingRoute.Namespace = route.Namespace + + op, err := controllerutil.CreateOrUpdate(ctx, r.Client, existingRoute, func() error { + // Update all mutable fields + existingRoute.Labels = route.Labels + existingRoute.Annotations = route.Annotations + existingRoute.Spec.Host = route.Spec.Host + existingRoute.Spec.Path = route.Spec.Path + existingRoute.Spec.Port = route.Spec.Port + existingRoute.Spec.TLS = route.Spec.TLS + existingRoute.Spec.To = route.Spec.To + existingRoute.Spec.WildcardPolicy = route.Spec.WildcardPolicy + + return controllerutil.SetControllerReference(owner, existingRoute, r.Scheme) + }) + + if err != nil { + log.Error(err, "Failed to reconcile route", + "name", route.Name, + "namespace", route.Namespace) + return err + } + + log.Info("Route reconciled", + "name", route.Name, + "namespace", route.Namespace, + "operation", op) + + return nil +} + +// createRouteForEndpoint creates an OpenShift Route for a specific endpoint. +// The route points to the ClusterIP service (serviceName with no suffix). +func (r *Reconciler) createRouteForEndpoint(ctx context.Context, owner metav1.Object, serviceName string, servicePort int32, + endpoint *operatorv1alpha1.Endpoint, baseLabels map[string]string) error { + + log := logf.FromContext(ctx) + + // Check if Route API is available in the cluster + if !r.RouteAvailable { + log.Info("Skipping route creation: Route API not available in cluster") + // TODO: update status of the jumpstarter object to indicate that the route is not available + return nil + } + + // Extract hostname from address + hostname := extractHostname(endpoint.Address) + if hostname == "" { + log.Info("Skipping route creation: no hostname in endpoint address", + "address", endpoint.Address) + return nil + } + + if errs := validation.IsDNS1123Subdomain(hostname); errs != nil { + log := logf.FromContext(ctx) + log.Error(errors.New(strings.Join(errs, ", ")), "Skipping ingress creation: invalid hostname", + "address", endpoint.Address, + "hostname", hostname) + // TODO: propagate error to status conditions + return nil + } + + // Build default annotations for OpenShift HAProxy router with longer timeouts for gRPC + defaultAnnotations := map[string]string{ + "haproxy.router.openshift.io/timeout": "2d", + "haproxy.router.openshift.io/timeout-tunnel": "2d", + } + + // Merge with user-provided annotations (user annotations take precedence) + annotations := utils.MergeMaps(defaultAnnotations, endpoint.Route.Annotations) + + // Merge labels (user labels take precedence) + routeLabels := utils.MergeMaps(baseLabels, endpoint.Route.Labels) + + // Use passthrough TLS termination (TLS is handled by the backend service) + // This is consistent with the Ingress configuration which uses ssl-passthrough + tlsTermination := routev1.TLSTerminationPassthrough + + route := &routev1.Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: serviceName + "-route", + Namespace: owner.GetNamespace(), + Labels: routeLabels, + Annotations: annotations, + }, + Spec: routev1.RouteSpec{ + Host: hostname, + Port: &routev1.RoutePort{ + TargetPort: intstr.FromInt(int(servicePort)), + }, + To: routev1.RouteTargetReference{ + Kind: "Service", + Name: serviceName, + Weight: ptr.To(int32(100)), + }, + TLS: &routev1.TLSConfig{ + Termination: tlsTermination, + InsecureEdgeTerminationPolicy: routev1.InsecureEdgeTerminationPolicyNone, + }, + WildcardPolicy: routev1.WildcardPolicyNone, + }, + } + + return r.createOrUpdateRoute(ctx, route, owner) +} diff --git a/controller/deploy/operator/internal/controller/jumpstarter/endpoints/suite_test.go b/controller/deploy/operator/internal/controller/jumpstarter/endpoints/suite_test.go new file mode 100644 index 000000000..6af1fab48 --- /dev/null +++ b/controller/deploy/operator/internal/controller/jumpstarter/endpoints/suite_test.go @@ -0,0 +1,93 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package endpoints + +import ( + "context" + "path/filepath" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + operatorv1alpha1 "github.com/jumpstarter-dev/jumpstarter-controller/deploy/operator/api/v1alpha1" + "github.com/jumpstarter-dev/jumpstarter-controller/deploy/operator/internal/controller/testutils" + // +kubebuilder:scaffold:imports +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +var ( + ctx context.Context + cancel context.CancelFunc + testEnv *envtest.Environment + cfg *rest.Config + k8sClient client.Client +) + +func TestEndpoints(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "Endpoints Suite") +} + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + ctx, cancel = context.WithCancel(context.TODO()) + + var err error + err = operatorv1alpha1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + // +kubebuilder:scaffold:scheme + + By("bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "..", "config", "crd", "bases")}, + ErrorIfCRDPathMissing: true, + } + + // Retrieve the first found binary directory to allow running tests from IDEs + if binaryDir := testutils.GetFirstFoundEnvTestBinaryDir(6); binaryDir != "" { + testEnv.BinaryAssetsDirectory = binaryDir + } + + // cfg is defined in this file globally. + cfg, err = testEnv.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient).NotTo(BeNil()) +}) + +var _ = AfterSuite(func() { + By("tearing down the test environment") + cancel() + err := testEnv.Stop() + Expect(err).NotTo(HaveOccurred()) +}) diff --git a/controller/deploy/operator/internal/controller/jumpstarter/jumpstarter_controller.go b/controller/deploy/operator/internal/controller/jumpstarter/jumpstarter_controller.go new file mode 100644 index 000000000..cf66494b5 --- /dev/null +++ b/controller/deploy/operator/internal/controller/jumpstarter/jumpstarter_controller.go @@ -0,0 +1,1215 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jumpstarter + +import ( + "context" + "crypto/rand" + "encoding/base64" + "fmt" + "net" + "strings" + "time" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" + apiserverv1beta1 "k8s.io/apiserver/pkg/apis/apiserver/v1beta1" + "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/yaml" + + operatorv1alpha1 "github.com/jumpstarter-dev/jumpstarter-controller/deploy/operator/api/v1alpha1" + "github.com/jumpstarter-dev/jumpstarter-controller/deploy/operator/internal/controller/jumpstarter/endpoints" + loglevels "github.com/jumpstarter-dev/jumpstarter-controller/deploy/operator/internal/log" + "github.com/jumpstarter-dev/jumpstarter-controller/internal/config" +) + +const ( + // appProtocolH2C is the application protocol for HTTP/2 Cleartext + appProtocolH2C = "h2c" +) + +// JumpstarterReconciler reconciles a Jumpstarter object +type JumpstarterReconciler struct { + client.Client + Scheme *runtime.Scheme + EndpointReconciler *endpoints.Reconciler +} + +// +kubebuilder:rbac:groups=operator.jumpstarter.dev,resources=jumpstarters,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=operator.jumpstarter.dev,resources=jumpstarters/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=operator.jumpstarter.dev,resources=jumpstarters/finalizers,verbs=update + +// Core Kubernetes resources +// +kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=apps,resources=deployments/status,verbs=get;update;patch +// +kubebuilder:rbac:groups="",resources=services,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups="",resources=services/status,verbs=get;update;patch +// +kubebuilder:rbac:groups="",resources=configmaps,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups="",resources=serviceaccounts,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups="",resources=events,verbs=create;patch + +// RBAC resources +// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=roles,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=rolebindings,verbs=get;list;watch;create;update;patch;delete + +// Leader election +// +kubebuilder:rbac:groups=coordination.k8s.io,resources=leases,verbs=get;list;watch;create;update;patch;delete + +// Networking resources +// +kubebuilder:rbac:groups=networking.k8s.io,resources=ingresses,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=networking.k8s.io,resources=ingresses/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=route.openshift.io,resources=routes,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=route.openshift.io,resources=routes/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=route.openshift.io,resources=routes/custom-host,verbs=get;create;update;patch + +// OpenShift cluster config (for baseDomain auto-detection) +// +kubebuilder:rbac:groups=config.openshift.io,resources=ingresses,verbs=get;list;watch + +// Monitoring resources +// +kubebuilder:rbac:groups=monitoring.coreos.com,resources=servicemonitors,verbs=get;list;watch;create;update;patch;delete + +// Jumpstarter CRD resources (needed to grant permissions to managed controllers) +// +kubebuilder:rbac:groups=jumpstarter.dev,resources=clients,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=jumpstarter.dev,resources=clients/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=jumpstarter.dev,resources=clients/finalizers,verbs=update +// +kubebuilder:rbac:groups=jumpstarter.dev,resources=exporters,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=jumpstarter.dev,resources=exporters/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=jumpstarter.dev,resources=exporters/finalizers,verbs=update +// +kubebuilder:rbac:groups=jumpstarter.dev,resources=leases,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=jumpstarter.dev,resources=leases/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=jumpstarter.dev,resources=leases/finalizers,verbs=update +// +kubebuilder:rbac:groups=jumpstarter.dev,resources=exporteraccesspolicies,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=jumpstarter.dev,resources=exporteraccesspolicies/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=jumpstarter.dev,resources=exporteraccesspolicies/finalizers,verbs=update + +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.21.0/pkg/reconcile +func (r *JumpstarterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + log := logf.FromContext(ctx) + + // Fetch the Jumpstarter instance + var jumpstarter operatorv1alpha1.Jumpstarter + if err := r.Get(ctx, req.NamespacedName, &jumpstarter); err != nil { + if errors.IsNotFound(err) { + // Request object not found, could have been deleted after reconcile request. + // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers. + log.Info("Jumpstarter resource not found. Ignoring since object must be deleted.") + return ctrl.Result{}, nil + } + // Error reading the object - requeue the request. + log.Error(err, "Failed to get Jumpstarter") + return ctrl.Result{}, err + } + + // Check if the instance is marked to be deleted + if jumpstarter.GetDeletionTimestamp() != nil { + // Handle finalizer logic here if needed + return ctrl.Result{}, nil + } + + // Apply runtime-computed defaults (endpoints based on baseDomain and cluster capabilities) + // Static defaults are handled by kubebuilder annotations in the CRD schema + r.EndpointReconciler.ApplyDefaults(&jumpstarter.Spec, jumpstarter.Namespace) + + // Reconcile RBAC resources first + if err := r.reconcileRBAC(ctx, &jumpstarter); err != nil { + log.Error(err, "Failed to reconcile RBAC") + return ctrl.Result{}, err + } + + // Reconcile Controller Deployment + if err := r.reconcileControllerDeployment(ctx, &jumpstarter); err != nil { + log.Error(err, "Failed to reconcile Controller Deployment") + return ctrl.Result{}, err + } + + // Reconcile Router Deployment + if err := r.reconcileRouterDeployment(ctx, &jumpstarter); err != nil { + log.Error(err, "Failed to reconcile Router Deployment") + return ctrl.Result{}, err + } + + // Reconcile Services + if err := r.reconcileServices(ctx, &jumpstarter); err != nil { + log.Error(err, "Failed to reconcile Services") + return ctrl.Result{}, err + } + + // Reconcile ConfigMaps + if err := r.reconcileConfigMaps(ctx, &jumpstarter); err != nil { + log.Error(err, "Failed to reconcile ConfigMaps") + return ctrl.Result{}, err + } + + // Reconcile Secrets + if err := r.reconcileSecrets(ctx, &jumpstarter); err != nil { + log.Error(err, "Failed to reconcile Secrets") + return ctrl.Result{}, err + } + + // Update status + if err := r.updateStatus(ctx, &jumpstarter); err != nil { + log.Error(err, "Failed to update status") + return ctrl.Result{}, err + } + + // Requeue after 30 minutes to check for changes + return ctrl.Result{RequeueAfter: 30 * time.Minute}, nil +} + +// reconcileControllerDeployment reconciles the controller deployment +func (r *JumpstarterReconciler) reconcileControllerDeployment(ctx context.Context, jumpstarter *operatorv1alpha1.Jumpstarter) error { + log := logf.FromContext(ctx) + desiredDeployment := r.createControllerDeployment(jumpstarter) + + existingDeployment := &appsv1.Deployment{} + existingDeployment.Name = desiredDeployment.Name + existingDeployment.Namespace = desiredDeployment.Namespace + + op, err := controllerutil.CreateOrUpdate(ctx, r.Client, existingDeployment, func() error { + // Check if this is a new deployment or an existing one + if existingDeployment.CreationTimestamp.IsZero() { + // Deployment is being created, copy all fields from desired + existingDeployment.Labels = desiredDeployment.Labels + existingDeployment.Annotations = desiredDeployment.Annotations + existingDeployment.Spec = desiredDeployment.Spec + return controllerutil.SetControllerReference(jumpstarter, existingDeployment, r.Scheme) + } + + desiredDeployment.Spec.Template.Spec.DeprecatedServiceAccount = existingDeployment.Spec.Template.Spec.DeprecatedServiceAccount + desiredDeployment.Spec.Template.Spec.SchedulerName = existingDeployment.Spec.Template.Spec.SchedulerName + + // Check if deployment needs update using compare function + if !deploymentNeedsUpdate(existingDeployment, desiredDeployment) { + log.V(1).Info("Controller deployment specs are equal, skipping update", + "name", existingDeployment.Name, + "namespace", existingDeployment.Namespace) + return nil + } + + // Deployment exists, generate and log diff + diff, err := generateDiff(existingDeployment, desiredDeployment) + if err != nil { + log.V(1).Info("Failed to generate deployment diff", "error", err) + } else if diff != "" { + fmt.Printf("\n=== Controller deployment differences detected ===\n") + fmt.Printf("Name: %s\n", existingDeployment.Name) + fmt.Printf("Namespace: %s\n", existingDeployment.Namespace) + fmt.Printf("\n%s\n", diff) + fmt.Printf("========================================\n\n") + } + + // Apply changes + existingDeployment.Labels = desiredDeployment.Labels + existingDeployment.Annotations = desiredDeployment.Annotations + existingDeployment.Spec.Replicas = desiredDeployment.Spec.Replicas + existingDeployment.Spec.Selector = desiredDeployment.Spec.Selector + existingDeployment.Spec.Template = desiredDeployment.Spec.Template + return controllerutil.SetControllerReference(jumpstarter, existingDeployment, r.Scheme) + }) + + if err != nil { + log.Error(err, "Failed to reconcile controller deployment", + "name", desiredDeployment.Name, + "namespace", desiredDeployment.Namespace) + return err + } + + log.Info("Controller deployment reconciled", + "name", existingDeployment.Name, + "namespace", existingDeployment.Namespace, + "operation", op) + + return nil +} + +// reconcileRouterDeployment reconciles router deployments (one per replica) +func (r *JumpstarterReconciler) reconcileRouterDeployment(ctx context.Context, jumpstarter *operatorv1alpha1.Jumpstarter) error { + log := logf.FromContext(ctx) + + // Create one deployment per replica + for i := int32(0); i < jumpstarter.Spec.Routers.Replicas; i++ { + desiredDeployment := r.createRouterDeployment(jumpstarter, i) + + existingDeployment := &appsv1.Deployment{} + existingDeployment.Name = desiredDeployment.Name + existingDeployment.Namespace = desiredDeployment.Namespace + + op, err := controllerutil.CreateOrUpdate(ctx, r.Client, existingDeployment, func() error { + // Check if this is a new deployment or an existing one + if existingDeployment.CreationTimestamp.IsZero() { + // Deployment is being created, copy all fields from desired + existingDeployment.Labels = desiredDeployment.Labels + existingDeployment.Annotations = desiredDeployment.Annotations + existingDeployment.Spec = desiredDeployment.Spec + return controllerutil.SetControllerReference(jumpstarter, existingDeployment, r.Scheme) + } + desiredDeployment.Spec.Template.Spec.SchedulerName = existingDeployment.Spec.Template.Spec.SchedulerName + desiredDeployment.Spec.Template.Spec.DeprecatedServiceAccount = existingDeployment.Spec.Template.Spec.DeprecatedServiceAccount + + if !deploymentNeedsUpdate(existingDeployment, desiredDeployment) { + log.V(1).Info("Router deployment specs are equal, skipping update", + "name", existingDeployment.Name, + "namespace", existingDeployment.Namespace, + "replica", i) + return nil + } + // Deployment exists, generate and log diff + diff, err := generateDiff(existingDeployment, desiredDeployment) + if err != nil { + log.V(1).Info("Failed to generate deployment diff", "error", err) + } else if diff != "" { + fmt.Printf("\n=== Router deployment differences detected ===\n") + fmt.Printf("Name: %s\n", existingDeployment.Name) + fmt.Printf("Namespace: %s\n", existingDeployment.Namespace) + fmt.Printf("Replica: %d\n", i) + fmt.Printf("\n%s\n", diff) + fmt.Printf("==============================================\n\n") + } + + // Apply changes + existingDeployment.Labels = desiredDeployment.Labels + existingDeployment.Annotations = desiredDeployment.Annotations + existingDeployment.Spec.Replicas = desiredDeployment.Spec.Replicas + existingDeployment.Spec.Selector = desiredDeployment.Spec.Selector + existingDeployment.Spec.Template = desiredDeployment.Spec.Template + return controllerutil.SetControllerReference(jumpstarter, existingDeployment, r.Scheme) + }) + + if err != nil { + log.Error(err, "Failed to reconcile router deployment", + "name", desiredDeployment.Name, + "namespace", desiredDeployment.Namespace, + "replica", i) + return err + } + + log.Info("Router deployment reconciled", + "name", existingDeployment.Name, + "namespace", existingDeployment.Namespace, + "replica", i, + "operation", op) + } + + // Clean up deployments for scaled-down replicas + if err := r.cleanupExcessRouterDeployments(ctx, jumpstarter); err != nil { + log.Error(err, "Failed to cleanup excess router deployments") + return err + } + + return nil +} + +// reconcileServices reconciles all services +func (r *JumpstarterReconciler) reconcileServices(ctx context.Context, jumpstarter *operatorv1alpha1.Jumpstarter) error { + log := logf.FromContext(ctx) + + // Reconcile controller services + for _, endpoint := range jumpstarter.Spec.Controller.GRPC.Endpoints { + appProtocol := appProtocolH2C + svcPort := corev1.ServicePort{ + Name: "controller-grpc", + Port: 8082, + TargetPort: intstr.FromInt(8082), + Protocol: corev1.ProtocolTCP, + AppProtocol: &appProtocol, + } + // Set NodePort if configured + if endpoint.NodePort != nil && endpoint.NodePort.Enabled && endpoint.NodePort.Port > 0 { + svcPort.NodePort = endpoint.NodePort.Port + } + if err := r.EndpointReconciler.ReconcileControllerEndpoint(ctx, jumpstarter, &endpoint, svcPort); err != nil { + return err + } + } + + // Reconcile router services - one per replica, all endpoints per replica + for i := int32(0); i < jumpstarter.Spec.Routers.Replicas; i++ { + if len(jumpstarter.Spec.Routers.GRPC.Endpoints) > 0 { + // Each replica gets ALL configured endpoints with replica substitution + for endpointIdx, baseEndpoint := range jumpstarter.Spec.Routers.GRPC.Endpoints { + endpoint := r.buildEndpointForReplica(jumpstarter, i, endpointIdx, &baseEndpoint) + + // Build unique service name for this replica AND endpoint + // This allows multiple service types (NodePort, LoadBalancer, etc.) per replica + serviceName := r.buildServiceNameForReplicaEndpoint(jumpstarter, i, endpointIdx) + + appProtocol := appProtocolH2C + svcPort := corev1.ServicePort{ + Name: serviceName, // Unique name per replica+endpoint + Port: 8083, + TargetPort: intstr.FromInt(8083), + Protocol: corev1.ProtocolTCP, + AppProtocol: &appProtocol, + } + // Set NodePort if configured + if endpoint.NodePort != nil && endpoint.NodePort.Enabled && endpoint.NodePort.Port > 0 { + // increase nodeport numbers based in replica, not perfect because it needs to be + // consecutive, but this is mostly for E2E testing. + svcPort.NodePort = endpoint.NodePort.Port + i + } + if err := r.EndpointReconciler.ReconcileRouterReplicaEndpoint(ctx, jumpstarter, i, endpointIdx, &endpoint, svcPort); err != nil { + return err + } + } + } else { + // No endpoints configured, create a default service without ingress/route + endpoint := operatorv1alpha1.Endpoint{ + Address: fmt.Sprintf("router-%d.%s", i, jumpstarter.Spec.BaseDomain), + } + + serviceName := fmt.Sprintf("%s-router-%d", jumpstarter.Name, i) + appProtocol := appProtocolH2C + svcPort := corev1.ServicePort{ + Name: serviceName, + Port: 8083, + TargetPort: intstr.FromInt(8083), + Protocol: corev1.ProtocolTCP, + AppProtocol: &appProtocol, + } + if err := r.EndpointReconciler.ReconcileRouterReplicaEndpoint(ctx, jumpstarter, i, 0, &endpoint, svcPort); err != nil { + return err + } + } + } + + // Clean up services for scaled-down replicas + if err := r.cleanupExcessRouterServices(ctx, jumpstarter); err != nil { + log.Error(err, "Failed to cleanup excess router services") + return err + } + + return nil +} + +// reconcileConfigMaps reconciles all configmaps +func (r *JumpstarterReconciler) reconcileConfigMaps(ctx context.Context, jumpstarter *operatorv1alpha1.Jumpstarter) error { + log := logf.FromContext(ctx) + desiredConfigMap, err := r.createConfigMap(jumpstarter) + if err != nil { + return fmt.Errorf("failed to create configmap: %w", err) + } + + existingConfigMap := &corev1.ConfigMap{} + existingConfigMap.Name = desiredConfigMap.Name + existingConfigMap.Namespace = desiredConfigMap.Namespace + + op, err := controllerutil.CreateOrUpdate(ctx, r.Client, existingConfigMap, func() error { + // Check if this is a new configmap or an existing one + if existingConfigMap.CreationTimestamp.IsZero() { + // ConfigMap is being created, copy all fields from desired + existingConfigMap.Labels = desiredConfigMap.Labels + existingConfigMap.Annotations = desiredConfigMap.Annotations + existingConfigMap.Data = desiredConfigMap.Data + existingConfigMap.BinaryData = desiredConfigMap.BinaryData + return controllerutil.SetControllerReference(jumpstarter, existingConfigMap, r.Scheme) + } + + // ConfigMap exists, check if update is needed + if !configMapNeedsUpdate(existingConfigMap, desiredConfigMap, log) { + log.V(1).Info("ConfigMap is up to date, skipping update", + "name", existingConfigMap.Name, + "namespace", existingConfigMap.Namespace) + return nil + } + + // Update needed - apply changes + existingConfigMap.Labels = desiredConfigMap.Labels + existingConfigMap.Annotations = desiredConfigMap.Annotations + existingConfigMap.Data = desiredConfigMap.Data + existingConfigMap.BinaryData = desiredConfigMap.BinaryData + return controllerutil.SetControllerReference(jumpstarter, existingConfigMap, r.Scheme) + }) + + if err != nil { + log.Error(err, "Failed to reconcile configmap", + "name", desiredConfigMap.Name, + "namespace", desiredConfigMap.Namespace) + return err + } + + log.Info("ConfigMap reconciled", + "name", existingConfigMap.Name, + "namespace", existingConfigMap.Namespace, + "operation", op) + + return nil +} + +// reconcileSecrets reconciles all secrets +// Secrets are only created if they don't exist. They are not updated or deleted +// to preserve secret keys across CR updates and deletions. +func (r *JumpstarterReconciler) reconcileSecrets(ctx context.Context, jumpstarter *operatorv1alpha1.Jumpstarter) error { + log := logf.FromContext(ctx) + + // Create controller secret if it doesn't exist + // Use fixed name to match Helm chart for migration compatibility + controllerSecretName := "jumpstarter-controller-secret" + if err := r.ensureSecretExists(ctx, jumpstarter, controllerSecretName); err != nil { + log.Error(err, "Failed to ensure controller secret exists", "secret", controllerSecretName) + return err + } + + // Create router secret if it doesn't exist + // Use fixed name to match Helm chart for migration compatibility + routerSecretName := "jumpstarter-router-secret" + if err := r.ensureSecretExists(ctx, jumpstarter, routerSecretName); err != nil { + log.Error(err, "Failed to ensure router secret exists", "secret", routerSecretName) + return err + } + + return nil +} + +// ensureSecretExists creates a secret only if it doesn't already exist +func (r *JumpstarterReconciler) ensureSecretExists(ctx context.Context, jumpstarter *operatorv1alpha1.Jumpstarter, name string) error { + log := logf.FromContext(ctx) + + // Check if secret already exists + existingSecret := &corev1.Secret{} + err := r.Get(ctx, client.ObjectKey{ + Namespace: jumpstarter.Namespace, + Name: name, + }, existingSecret) + + if err == nil { + // Secret already exists, don't update it + log.V(loglevels.LevelTrace).Info("Secret already exists, skipping creation", "secret", name) + return nil + } + + if !errors.IsNotFound(err) { + // Some other error occurred + return err + } + + // Secret doesn't exist, create it with a random key + randomKey, err := generateRandomKey(32) + if err != nil { + return fmt.Errorf("failed to generate random key: %w", err) + } + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: jumpstarter.Namespace, + Labels: map[string]string{ + "app": jumpstarter.Name, + "app.kubernetes.io/managed-by": "jumpstarter-operator", + }, + Annotations: map[string]string{ + "jumpstarter.dev/orphan": "true", + }, + }, + StringData: map[string]string{ + "key": randomKey, + }, + } + + // Note: We intentionally do NOT set owner reference here so that + // secrets are not deleted when the Jumpstarter CR is deleted. + // This preserves the secret keys across CR deletions and recreations. + + if err := r.Create(ctx, secret); err != nil { + // Handle race condition where secret was created between Get and Create + if errors.IsAlreadyExists(err) { + log.V(loglevels.LevelDebug).Info("Secret was created by another reconciliation", "secret", name) + return nil + } + return fmt.Errorf("failed to create secret: %w", err) + } + + log.Info("Created new secret with random key", "secret", name) + return nil +} + +// generateRandomKey generates a cryptographically secure random key +func generateRandomKey(length int) (string, error) { + bytes := make([]byte, length) + if _, err := rand.Read(bytes); err != nil { + return "", err + } + return base64.URLEncoding.EncodeToString(bytes), nil +} + +// updateStatus updates the status of the Jumpstarter resource +func (r *JumpstarterReconciler) updateStatus(ctx context.Context, jumpstarter *operatorv1alpha1.Jumpstarter) error { + // Update status fields based on current state + // This is a placeholder - actual implementation would check deployment status, etc. + // TODO: Add status fields to JumpstarterStatus in the API types + + return nil +} + +// createControllerDeployment creates a deployment for the controller +func (r *JumpstarterReconciler) createControllerDeployment(jumpstarter *operatorv1alpha1.Jumpstarter) *appsv1.Deployment { + labels := map[string]string{ + "component": "controller", + "app": "jumpstarter-controller", + "controller": jumpstarter.Name, + } + + // Build GRPC endpoint from first controller endpoint + // Default to port 443 for TLS gRPC endpoints + grpcEndpoint := "" + if len(jumpstarter.Spec.Controller.GRPC.Endpoints) > 0 { + ep := jumpstarter.Spec.Controller.GRPC.Endpoints[0] + if ep.Address != "" { + grpcEndpoint = ensurePort(ep.Address, "443") + } else { + grpcEndpoint = fmt.Sprintf("grpc.%s:443", jumpstarter.Spec.BaseDomain) + } + } + + return &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-controller", jumpstarter.Name), + Namespace: jumpstarter.Namespace, + Labels: labels, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: &jumpstarter.Spec.Controller.Replicas, + ProgressDeadlineSeconds: ptr.To(int32(600)), + RevisionHistoryLimit: ptr.To(int32(10)), + Strategy: appsv1.DeploymentStrategy{ + Type: appsv1.RollingUpdateDeploymentStrategyType, + RollingUpdate: &appsv1.RollingUpdateDeployment{ + MaxSurge: &intstr.IntOrString{Type: intstr.String, StrVal: "25%"}, + MaxUnavailable: &intstr.IntOrString{Type: intstr.String, StrVal: "25%"}, + }, + }, + Selector: &metav1.LabelSelector{ + MatchLabels: labels, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: labels, + }, + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyAlways, + DNSPolicy: corev1.DNSClusterFirst, + TerminationGracePeriodSeconds: ptr.To(int64(30)), + Containers: []corev1.Container{ + { + Name: "manager", + Image: jumpstarter.Spec.Controller.Image, + ImagePullPolicy: jumpstarter.Spec.Controller.ImagePullPolicy, + Args: []string{ + "--leader-elect", + "--health-probe-bind-address=:8081", + "-metrics-bind-address=:8080", + }, + Env: []corev1.EnvVar{ + { + Name: "GRPC_ENDPOINT", + Value: grpcEndpoint, + }, + { + Name: "CONTROLLER_KEY", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "jumpstarter-controller-secret", + }, + Key: "key", + }, + }, + }, + { + Name: "ROUTER_KEY", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "jumpstarter-router-secret", + }, + Key: "key", + }, + }, + }, + { + Name: "NAMESPACE", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.namespace", + APIVersion: "v1", + }, + }, + }, + { + Name: "GIN_MODE", + Value: "release", + }, + }, + Ports: []corev1.ContainerPort{ + { + ContainerPort: 8082, + Name: "grpc", + Protocol: corev1.ProtocolTCP, + }, + { + ContainerPort: 8080, + Name: "metrics", + Protocol: corev1.ProtocolTCP, + }, + { + ContainerPort: 8081, + Name: "health", + Protocol: corev1.ProtocolTCP, + }, + }, + LivenessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/healthz", + Port: intstr.FromInt(8081), + Scheme: corev1.URISchemeHTTP, + }, + }, + InitialDelaySeconds: 15, + PeriodSeconds: 20, + TimeoutSeconds: 1, + SuccessThreshold: 1, + FailureThreshold: 3, + }, + ReadinessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/readyz", + Port: intstr.FromInt(8081), + Scheme: corev1.URISchemeHTTP, + }, + }, + InitialDelaySeconds: 5, + PeriodSeconds: 10, + TimeoutSeconds: 1, + SuccessThreshold: 1, + FailureThreshold: 3, + }, + Resources: jumpstarter.Spec.Controller.Resources, + TerminationMessagePath: "/dev/termination-log", + TerminationMessagePolicy: corev1.TerminationMessageReadFile, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: boolPtr(false), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + }, + }, + }, + SecurityContext: &corev1.PodSecurityContext{ + RunAsNonRoot: boolPtr(true), + SeccompProfile: &corev1.SeccompProfile{ + Type: corev1.SeccompProfileTypeRuntimeDefault, + }, + }, + ServiceAccountName: fmt.Sprintf("%s-controller-manager", jumpstarter.Name), + }, + }, + }, + } +} + +func boolPtr(b bool) *bool { + return &b +} + +// createRouterDeployment creates a deployment for a specific router replica +func (r *JumpstarterReconciler) createRouterDeployment(jumpstarter *operatorv1alpha1.Jumpstarter, replicaIndex int32) *appsv1.Deployment { + // Base app label that ALL services for this replica will select + // Individual services will be named with endpoint suffixes, but all select the same pods + baseAppLabel := fmt.Sprintf("%s-router-%d", jumpstarter.Name, replicaIndex) + + labels := map[string]string{ + "component": "router", + "app": baseAppLabel, // All services for this replica select by this label + "router": jumpstarter.Name, + "router-index": fmt.Sprintf("%d", replicaIndex), + } + + // Build router endpoint for this specific replica + routerEndpoint := r.buildRouterEndpointForReplica(jumpstarter, replicaIndex) + + return &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-router-%d", jumpstarter.Name, replicaIndex), + Namespace: jumpstarter.Namespace, + Labels: labels, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: ptr.To(int32(1)), // Each deployment for the router needs to have exactly 1 replica + ProgressDeadlineSeconds: ptr.To(int32(600)), + RevisionHistoryLimit: ptr.To(int32(10)), + Strategy: appsv1.DeploymentStrategy{ + Type: appsv1.RollingUpdateDeploymentStrategyType, + RollingUpdate: &appsv1.RollingUpdateDeployment{ + MaxSurge: &intstr.IntOrString{Type: intstr.String, StrVal: "25%"}, + MaxUnavailable: &intstr.IntOrString{Type: intstr.String, StrVal: "25%"}, + }, + }, + Selector: &metav1.LabelSelector{ + MatchLabels: labels, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: labels, + }, + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyAlways, + DNSPolicy: corev1.DNSClusterFirst, + TerminationGracePeriodSeconds: ptr.To(int64(30)), + Containers: []corev1.Container{ + { + Name: "router", + Image: jumpstarter.Spec.Routers.Image, + ImagePullPolicy: jumpstarter.Spec.Routers.ImagePullPolicy, + Command: []string{"/router"}, + Env: []corev1.EnvVar{ + { + Name: "GRPC_ROUTER_ENDPOINT", + Value: routerEndpoint, + }, + { + Name: "ROUTER_KEY", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "jumpstarter-router-secret", + }, + Key: "key", + }, + }, + }, + { + Name: "NAMESPACE", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.namespace", + APIVersion: "v1", + }, + }, + }, + }, + Ports: []corev1.ContainerPort{ + { + ContainerPort: 8083, + Name: "grpc", + Protocol: corev1.ProtocolTCP, + }, + { + ContainerPort: 8080, + Name: "metrics", + Protocol: corev1.ProtocolTCP, + }, + { + ContainerPort: 8081, + Name: "health", + Protocol: corev1.ProtocolTCP, + }, + }, + Resources: jumpstarter.Spec.Routers.Resources, + TerminationMessagePath: "/dev/termination-log", + TerminationMessagePolicy: corev1.TerminationMessageReadFile, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: boolPtr(false), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + }, + }, + }, + SecurityContext: &corev1.PodSecurityContext{ + RunAsNonRoot: boolPtr(true), + SeccompProfile: &corev1.SeccompProfile{ + Type: corev1.SeccompProfileTypeRuntimeDefault, + }, + }, + ServiceAccountName: fmt.Sprintf("%s-controller-manager", jumpstarter.Name), + TopologySpreadConstraints: jumpstarter.Spec.Routers.TopologySpreadConstraints, + }, + }, + }, + } +} + +// createConfigMap creates a configmap for jumpstarter configuration +func (r *JumpstarterReconciler) createConfigMap(jumpstarter *operatorv1alpha1.Jumpstarter) (*corev1.ConfigMap, error) { + // Build config struct from spec + cfg := r.buildConfig(jumpstarter) + + // Marshal to YAML + configYAML, err := yaml.Marshal(cfg) + if err != nil { + return nil, fmt.Errorf("failed to marshal config to YAML: %w", err) + } + + // Build router configuration for all replicas + router := r.buildRouter(jumpstarter) + + // Marshal router to YAML + routerYAML, err := yaml.Marshal(router) + if err != nil { + return nil, fmt.Errorf("failed to marshal router to YAML: %w", err) + } + + return &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "jumpstarter-controller", + Namespace: jumpstarter.Namespace, + Labels: map[string]string{ + "app": "jumpstarter-controller", + "control-plane": "controller-manager", + }, + }, + Data: map[string]string{ + "config": string(configYAML), + "router": string(routerYAML), + }, + }, nil +} + +// buildConfig builds the controller configuration struct from the CR spec +func (r *JumpstarterReconciler) buildConfig(jumpstarter *operatorv1alpha1.Jumpstarter) config.Config { + cfg := config.Config{ + Provisioning: config.Provisioning{ + Enabled: false, + }, + Grpc: config.Grpc{ + Keepalive: config.Keepalive{ + MinTime: "1s", + PermitWithoutStream: true, + }, + }, + } + + // Authentication configuration + auth := config.Authentication{ + JWT: jumpstarter.Spec.Controller.Authentication.JWT, + } + + // Internal authentication + if jumpstarter.Spec.Controller.Authentication.Internal.Enabled { + prefix := jumpstarter.Spec.Controller.Authentication.Internal.Prefix + if prefix == "" { + prefix = "internal:" + } + auth.Internal.Prefix = prefix + + if jumpstarter.Spec.Controller.Authentication.Internal.TokenLifetime != nil { + auth.Internal.TokenLifetime = jumpstarter.Spec.Controller.Authentication.Internal.TokenLifetime.Duration.String() + } + } + + // Kubernetes authentication + if jumpstarter.Spec.Controller.Authentication.K8s.Enabled { + auth.K8s.Enabled = true + } + + // Ensure JWT is an empty array, not null + if auth.JWT == nil { + auth.JWT = []apiserverv1beta1.JWTAuthenticator{} + } + + cfg.Authentication = auth + + // gRPC keepalive configuration + if jumpstarter.Spec.Controller.GRPC.Keepalive != nil { + ka := &cfg.Grpc.Keepalive + + if jumpstarter.Spec.Controller.GRPC.Keepalive.MinTime != nil { + ka.MinTime = jumpstarter.Spec.Controller.GRPC.Keepalive.MinTime.Duration.String() + } + + ka.PermitWithoutStream = jumpstarter.Spec.Controller.GRPC.Keepalive.PermitWithoutStream + + if jumpstarter.Spec.Controller.GRPC.Keepalive.Timeout != nil { + ka.Timeout = jumpstarter.Spec.Controller.GRPC.Keepalive.Timeout.Duration.String() + } + + if jumpstarter.Spec.Controller.GRPC.Keepalive.IntervalTime != nil { + ka.IntervalTime = jumpstarter.Spec.Controller.GRPC.Keepalive.IntervalTime.Duration.String() + } + + if jumpstarter.Spec.Controller.GRPC.Keepalive.MaxConnectionIdle != nil { + ka.MaxConnectionIdle = jumpstarter.Spec.Controller.GRPC.Keepalive.MaxConnectionIdle.Duration.String() + } + + if jumpstarter.Spec.Controller.GRPC.Keepalive.MaxConnectionAge != nil { + ka.MaxConnectionAge = jumpstarter.Spec.Controller.GRPC.Keepalive.MaxConnectionAge.Duration.String() + } + + if jumpstarter.Spec.Controller.GRPC.Keepalive.MaxConnectionAgeGrace != nil { + ka.MaxConnectionAgeGrace = jumpstarter.Spec.Controller.GRPC.Keepalive.MaxConnectionAgeGrace.Duration.String() + } + } + + return cfg +} + +// buildRouter builds the router configuration with entries for all replicas +func (r *JumpstarterReconciler) buildRouter(jumpstarter *operatorv1alpha1.Jumpstarter) config.Router { + router := make(config.Router) + + // Create router entry for each replica + for i := int32(0); i < jumpstarter.Spec.Routers.Replicas; i++ { + // First replica is named "default" for backwards compatibility + routerName := "default" + if i > 0 { + routerName = fmt.Sprintf("router-%d", i) + } + + entry := config.RouterEntry{ + Endpoint: r.buildRouterEndpointForReplica(jumpstarter, i), + } + + // Add labels if this is not the default router (replica 0) + // Additional routers get labels to distinguish them + if i > 0 { + entry.Labels = map[string]string{ + "router-index": fmt.Sprintf("%d", i), + } + } + + router[routerName] = entry + } + + return router +} + +// buildRouterEndpointForReplica builds the GRPC_ROUTER_ENDPOINT for a specific replica +// This is the primary endpoint the router advertises itself as +func (r *JumpstarterReconciler) buildRouterEndpointForReplica(jumpstarter *operatorv1alpha1.Jumpstarter, replicaIndex int32) string { + // If endpoints are specified, use the first one as the primary endpoint + if len(jumpstarter.Spec.Routers.GRPC.Endpoints) > 0 { + ep := jumpstarter.Spec.Routers.GRPC.Endpoints[0] + address := ep.Address + if address != "" { + address = r.substituteReplica(address, replicaIndex) + return ensurePort(address, "443") + } + } + // Default pattern: router-N.baseDomain + return fmt.Sprintf("router-%d.%s:443", replicaIndex, jumpstarter.Spec.BaseDomain) +} + +// substituteReplica replaces $(replica) placeholder with actual replica index +func (r *JumpstarterReconciler) substituteReplica(address string, replicaIndex int32) string { + return strings.ReplaceAll(address, "$(replica)", fmt.Sprintf("%d", replicaIndex)) +} + +// ensurePort adds a default port to an address if it doesn't already have one +// Handles IPv4, IPv6, and hostnames correctly using net.SplitHostPort +func ensurePort(address, defaultPort string) string { + // Try to split the address into host and port + _, _, err := net.SplitHostPort(address) + if err == nil { + // Address already has a port, return as-is + return address + } + + // No port found, need to add one + // net.JoinHostPort handles IPv6 addresses correctly (adds brackets if needed) + return net.JoinHostPort(address, defaultPort) +} + +// buildServiceNameForReplicaEndpoint creates a unique service name for a router replica and endpoint +func (r *JumpstarterReconciler) buildServiceNameForReplicaEndpoint(jumpstarter *operatorv1alpha1.Jumpstarter, replicaIndex int32, endpointIdx int) string { + if endpointIdx == 0 { + // First endpoint uses base name for backwards compatibility + return fmt.Sprintf("%s-router-%d", jumpstarter.Name, replicaIndex) + } + // Additional endpoints get a suffix + return fmt.Sprintf("%s-router-%d-%d", jumpstarter.Name, replicaIndex, endpointIdx) +} + +// buildEndpointForReplica creates an Endpoint struct for a specific router replica and endpoint +func (r *JumpstarterReconciler) buildEndpointForReplica(jumpstarter *operatorv1alpha1.Jumpstarter, replicaIndex int32, endpointIdx int, baseEndpoint *operatorv1alpha1.Endpoint) operatorv1alpha1.Endpoint { + // Copy the base endpoint + endpoint := *baseEndpoint + + // Set or substitute address + if endpoint.Address != "" { + endpoint.Address = r.substituteReplica(endpoint.Address, replicaIndex) + } else { + // Default address pattern when none specified + if endpointIdx == 0 { + endpoint.Address = fmt.Sprintf("router-%d.%s", replicaIndex, jumpstarter.Spec.BaseDomain) + } else { + endpoint.Address = fmt.Sprintf("router-%d-%d.%s", replicaIndex, endpointIdx, jumpstarter.Spec.BaseDomain) + } + } + + return endpoint +} + +// cleanupExcessRouterDeployments deletes router deployments that exceed the current replica count +func (r *JumpstarterReconciler) cleanupExcessRouterDeployments(ctx context.Context, jumpstarter *operatorv1alpha1.Jumpstarter) error { + log := logf.FromContext(ctx) + + // List all deployments with our router label + deploymentList := &appsv1.DeploymentList{} + listOpts := []client.ListOption{ + client.InNamespace(jumpstarter.Namespace), + client.MatchingLabels{ + "router": jumpstarter.Name, + }, + } + + if err := r.List(ctx, deploymentList, listOpts...); err != nil { + return fmt.Errorf("failed to list router deployments: %w", err) + } + + // Delete deployments with replica index >= current replica count + for i := range deploymentList.Items { + deployment := &deploymentList.Items[i] + + // Check if this deployment's name indicates it's beyond the current replica count + // We need to check all indices from current replicas onwards + for idx := jumpstarter.Spec.Routers.Replicas; idx < 100; idx++ { // reasonable upper bound + excessName := fmt.Sprintf("%s-router-%d", jumpstarter.Name, idx) + if deployment.Name == excessName { + log.Info("Deleting excess router deployment", "deployment", deployment.Name, "replicaIndex", idx) + if err := r.Delete(ctx, deployment); err != nil { + if !errors.IsNotFound(err) { + return fmt.Errorf("failed to delete excess deployment %s: %w", deployment.Name, err) + } + } + break + } + } + } + + return nil +} + +// cleanupExcessRouterServices deletes router services that exceed the current replica count +// or endpoint count. This ensures that when replicas or endpoints are scaled down, the +// corresponding services are removed. +func (r *JumpstarterReconciler) cleanupExcessRouterServices(ctx context.Context, jumpstarter *operatorv1alpha1.Jumpstarter) error { + log := logf.FromContext(ctx) + + // Services can have suffixes for different service types + // ClusterIP has no suffix, LoadBalancer has "-lb", NodePort has "-np" + suffixes := []string{"", "-lb", "-np"} + + // 1. Delete services for excess replicas (replica index >= current replica count) + for idx := jumpstarter.Spec.Routers.Replicas; idx < 100; idx++ { // reasonable upper bound + foundAny := false + + // Try to delete services for all endpoints and service types for this replica + for endpointIdx := 0; endpointIdx < 10; endpointIdx++ { // reasonable upper bound for endpoints + for _, suffix := range suffixes { + var serviceName string + if endpointIdx == 0 { + serviceName = fmt.Sprintf("%s-router-%d%s", jumpstarter.Name, idx, suffix) + } else { + serviceName = fmt.Sprintf("%s-router-%d-%d%s", jumpstarter.Name, idx, endpointIdx, suffix) + } + + service := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: serviceName, + Namespace: jumpstarter.Namespace, + }, + } + + err := r.Delete(ctx, service) + if err != nil { + if !errors.IsNotFound(err) { + return fmt.Errorf("failed to delete excess service %s: %w", serviceName, err) + } + } else { + foundAny = true + log.Info("Deleted excess router service", "service", serviceName, "replicaIndex", idx, "endpointIdx", endpointIdx) + } + } + } + + // If we didn't find any services for this replica index, we've gone past all excess services + if !foundAny { + break + } + } + + // 2. Delete services for excess endpoints within valid replicas + numEndpoints := len(jumpstarter.Spec.Routers.GRPC.Endpoints) + if numEndpoints == 0 { + numEndpoints = 1 // default endpoint + } + + for replicaIdx := int32(0); replicaIdx < jumpstarter.Spec.Routers.Replicas; replicaIdx++ { + for endpointIdx := numEndpoints; endpointIdx < 10; endpointIdx++ { // reasonable upper bound + foundAny := false + + for _, suffix := range suffixes { + var serviceName string + if endpointIdx == 0 { + serviceName = fmt.Sprintf("%s-router-%d%s", jumpstarter.Name, replicaIdx, suffix) + } else { + serviceName = fmt.Sprintf("%s-router-%d-%d%s", jumpstarter.Name, replicaIdx, endpointIdx, suffix) + } + + service := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: serviceName, + Namespace: jumpstarter.Namespace, + }, + } + + err := r.Delete(ctx, service) + if err != nil { + if !errors.IsNotFound(err) { + return fmt.Errorf("failed to delete excess endpoint service %s: %w", serviceName, err) + } + } else { + foundAny = true + log.Info("Deleted excess endpoint service", "service", serviceName, "replicaIndex", replicaIdx, "endpointIdx", endpointIdx) + } + } + + // If we didn't find any services for this endpoint index, we've gone past all excess endpoints + if !foundAny { + break + } + } + } + + return nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *JumpstarterReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&operatorv1alpha1.Jumpstarter{}). + Named("jumpstarter"). + Owns(&appsv1.Deployment{}). + Owns(&corev1.Service{}). + Owns(&corev1.ConfigMap{}). + Owns(&rbacv1.Role{}). + Owns(&rbacv1.RoleBinding{}). + // Note: Secrets and ServiceAccounts are intentionally NOT owned to prevent deletion + Complete(r) +} diff --git a/controller/deploy/operator/internal/controller/jumpstarter/jumpstarter_controller_test.go b/controller/deploy/operator/internal/controller/jumpstarter/jumpstarter_controller_test.go new file mode 100644 index 000000000..08799d371 --- /dev/null +++ b/controller/deploy/operator/internal/controller/jumpstarter/jumpstarter_controller_test.go @@ -0,0 +1,142 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jumpstarter + +import ( + "context" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + operatorv1alpha1 "github.com/jumpstarter-dev/jumpstarter-controller/deploy/operator/api/v1alpha1" + "github.com/jumpstarter-dev/jumpstarter-controller/deploy/operator/internal/controller/jumpstarter/endpoints" +) + +var _ = Describe("Jumpstarter Controller", func() { + Context("When reconciling a resource", func() { + const resourceName = "test-resource" + + ctx := context.Background() + + typeNamespacedName := types.NamespacedName{ + Name: resourceName, + Namespace: "default", // TODO(user):Modify as needed + } + jumpstarter := &operatorv1alpha1.Jumpstarter{} + + BeforeEach(func() { + By("creating the custom resource for the Kind Jumpstarter") + err := k8sClient.Get(ctx, typeNamespacedName, jumpstarter) + if err != nil && errors.IsNotFound(err) { + resource := &operatorv1alpha1.Jumpstarter{ + ObjectMeta: metav1.ObjectMeta{ + Name: resourceName, + Namespace: "default", + }, + Spec: operatorv1alpha1.JumpstarterSpec{ + BaseDomain: "example.com", + UseCertManager: true, + Controller: operatorv1alpha1.ControllerConfig{ + Image: "quay.io/jumpstarter/jumpstarter:latest", + ImagePullPolicy: "IfNotPresent", + Replicas: 1, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("100Mi"), + }, + }, + GRPC: operatorv1alpha1.GRPCConfig{ + Endpoints: []operatorv1alpha1.Endpoint{ + { + Address: "controller", + }, + }, + }, + }, + Routers: operatorv1alpha1.RoutersConfig{ + Image: "quay.io/jumpstarter/jumpstarter:latest", + ImagePullPolicy: "IfNotPresent", + Replicas: 1, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("100Mi"), + }, + }, + GRPC: operatorv1alpha1.GRPCConfig{ + Endpoints: []operatorv1alpha1.Endpoint{ + { + Address: "router", + }, + }, + }, + }, + }, + } + Expect(k8sClient.Create(ctx, resource)).To(Succeed()) + } + }) + + AfterEach(func() { + // TODO(user): Cleanup logic after each test, like removing the resource instance. + resource := &operatorv1alpha1.Jumpstarter{} + err := k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + By("Cleanup the specific resource instance Jumpstarter") + Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) + }) + It("should successfully reconcile the resource", func() { + By("Reconciling the created resource") + controllerReconciler := &JumpstarterReconciler{ + Client: k8sClient, + Scheme: k8sClient.Scheme(), + EndpointReconciler: endpoints.NewReconciler(k8sClient, k8sClient.Scheme(), cfg), + } + + _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + // TODO(user): Add more specific assertions depending on your controller's reconciliation logic. + // Example: If you expect a certain status condition after reconciliation, verify it here. + }) + }) +}) + +var _ = Describe("ensurePort", func() { + DescribeTable("should handle addresses correctly", + func(address, defaultPort, expected string) { + result := ensurePort(address, defaultPort) + Expect(result).To(Equal(expected)) + }, + Entry("hostname without port", "example.com", "443", "example.com:443"), + Entry("hostname with port", "example.com:8083", "443", "example.com:8083"), + Entry("IPv6 without port", "2001:db8::1", "443", "[2001:db8::1]:443"), + Entry("IPv6 with port", "[2001:db8::1]:8083", "443", "[2001:db8::1]:8083"), + Entry("malformed - too many colons", "host:port:extra", "443", "[host:port:extra]:443"), + Entry("malformed - empty string", "", "443", ":443"), + Entry("malformed - just colon", ":", "443", ":"), + ) +}) diff --git a/controller/deploy/operator/internal/controller/jumpstarter/rbac.go b/controller/deploy/operator/internal/controller/jumpstarter/rbac.go new file mode 100644 index 000000000..e03336181 --- /dev/null +++ b/controller/deploy/operator/internal/controller/jumpstarter/rbac.go @@ -0,0 +1,249 @@ +package jumpstarter + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + logf "sigs.k8s.io/controller-runtime/pkg/log" + + operatorv1alpha1 "github.com/jumpstarter-dev/jumpstarter-controller/deploy/operator/api/v1alpha1" +) + +// reconcileRBAC reconciles all RBAC resources (ServiceAccount, Role, RoleBinding) +func (r *JumpstarterReconciler) reconcileRBAC(ctx context.Context, jumpstarter *operatorv1alpha1.Jumpstarter) error { + log := logf.FromContext(ctx) + + // ServiceAccount + // Note: We intentionally do NOT set controller reference on ServiceAccount to prevent + // it from being garbage collected when the Jumpstarter CR is deleted + desiredSA := r.createServiceAccount(jumpstarter) + + existingSA := &corev1.ServiceAccount{} + existingSA.Name = desiredSA.Name + existingSA.Namespace = desiredSA.Namespace + + op, err := controllerutil.CreateOrUpdate(ctx, r.Client, existingSA, func() error { + // Check if this is a new service account or an existing one + if existingSA.CreationTimestamp.IsZero() { + // ServiceAccount is being created, copy all fields from desired + existingSA.Labels = desiredSA.Labels + existingSA.Annotations = desiredSA.Annotations + return nil + } + + // ServiceAccount exists, check if update is needed + if !serviceAccountNeedsUpdate(existingSA, desiredSA) { + log.V(1).Info("ServiceAccount is up to date, skipping update", + "name", existingSA.Name, + "namespace", existingSA.Namespace) + return nil + } + + // Update needed - apply changes + existingSA.Labels = desiredSA.Labels + existingSA.Annotations = desiredSA.Annotations + return nil + }) + + if err != nil { + log.Error(err, "Failed to reconcile ServiceAccount", + "name", desiredSA.Name, + "namespace", desiredSA.Namespace) + return err + } + + log.Info("ServiceAccount reconciled", + "name", existingSA.Name, + "namespace", existingSA.Namespace, + "operation", op) + + // Role + desiredRole := r.createRole(jumpstarter) + + existingRole := &rbacv1.Role{} + existingRole.Name = desiredRole.Name + existingRole.Namespace = desiredRole.Namespace + + op, err = controllerutil.CreateOrUpdate(ctx, r.Client, existingRole, func() error { + // Check if this is a new role or an existing one + if existingRole.CreationTimestamp.IsZero() { + // Role is being created, copy all fields from desired + existingRole.Labels = desiredRole.Labels + existingRole.Annotations = desiredRole.Annotations + existingRole.Rules = desiredRole.Rules + return controllerutil.SetControllerReference(jumpstarter, existingRole, r.Scheme) + } + + // Role exists, check if update is needed + if !roleNeedsUpdate(existingRole, desiredRole) { + log.V(1).Info("Role is up to date, skipping update", + "name", existingRole.Name, + "namespace", existingRole.Namespace) + return nil + } + + // Update needed - apply changes + existingRole.Labels = desiredRole.Labels + existingRole.Annotations = desiredRole.Annotations + existingRole.Rules = desiredRole.Rules + return controllerutil.SetControllerReference(jumpstarter, existingRole, r.Scheme) + }) + + if err != nil { + log.Error(err, "Failed to reconcile Role", + "name", desiredRole.Name, + "namespace", desiredRole.Namespace) + return err + } + + log.Info("Role reconciled", + "name", existingRole.Name, + "namespace", existingRole.Namespace, + "operation", op) + + // RoleBinding + desiredRoleBinding := r.createRoleBinding(jumpstarter) + + existingRoleBinding := &rbacv1.RoleBinding{} + existingRoleBinding.Name = desiredRoleBinding.Name + existingRoleBinding.Namespace = desiredRoleBinding.Namespace + + op, err = controllerutil.CreateOrUpdate(ctx, r.Client, existingRoleBinding, func() error { + // Check if this is a new role binding or an existing one + if existingRoleBinding.CreationTimestamp.IsZero() { + // RoleBinding is being created, copy all fields from desired + existingRoleBinding.Labels = desiredRoleBinding.Labels + existingRoleBinding.Annotations = desiredRoleBinding.Annotations + existingRoleBinding.Subjects = desiredRoleBinding.Subjects + existingRoleBinding.RoleRef = desiredRoleBinding.RoleRef + return controllerutil.SetControllerReference(jumpstarter, existingRoleBinding, r.Scheme) + } + + // RoleBinding exists, check if update is needed + if !roleBindingNeedsUpdate(existingRoleBinding, desiredRoleBinding) { + log.V(1).Info("RoleBinding is up to date, skipping update", + "name", existingRoleBinding.Name, + "namespace", existingRoleBinding.Namespace) + return nil + } + + // Update needed - apply changes + existingRoleBinding.Labels = desiredRoleBinding.Labels + existingRoleBinding.Annotations = desiredRoleBinding.Annotations + existingRoleBinding.Subjects = desiredRoleBinding.Subjects + existingRoleBinding.RoleRef = desiredRoleBinding.RoleRef + return controllerutil.SetControllerReference(jumpstarter, existingRoleBinding, r.Scheme) + }) + + if err != nil { + log.Error(err, "Failed to reconcile RoleBinding", + "name", desiredRoleBinding.Name, + "namespace", desiredRoleBinding.Namespace) + return err + } + + log.Info("RoleBinding reconciled", + "name", existingRoleBinding.Name, + "namespace", existingRoleBinding.Namespace, + "operation", op) + + return nil +} + +// createServiceAccount creates a service account for the controller +func (r *JumpstarterReconciler) createServiceAccount(jumpstarter *operatorv1alpha1.Jumpstarter) *corev1.ServiceAccount { + return &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-controller-manager", jumpstarter.Name), + Namespace: jumpstarter.Namespace, + Labels: map[string]string{ + "app": "jumpstarter-controller", + "app.kubernetes.io/name": "jumpstarter-controller", + "app.kubernetes.io/managed-by": "jumpstarter-operator", + }, + }, + } +} + +// createRole creates a role with necessary permissions for the controller +func (r *JumpstarterReconciler) createRole(jumpstarter *operatorv1alpha1.Jumpstarter) *rbacv1.Role { + return &rbacv1.Role{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-controller-role", jumpstarter.Name), + Namespace: jumpstarter.Namespace, + Labels: map[string]string{ + "app": "jumpstarter-controller", + "app.kubernetes.io/name": "jumpstarter-controller", + "app.kubernetes.io/managed-by": "jumpstarter-operator", + }, + }, + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{""}, + Resources: []string{"configmaps"}, + Verbs: []string{"get", "list", "watch"}, + }, + { + APIGroups: []string{""}, + Resources: []string{"secrets"}, + Verbs: []string{"get", "list", "watch", "create", "update", "patch", "delete"}, + }, + { + APIGroups: []string{"jumpstarter.dev"}, + Resources: []string{"clients", "exporters", "leases", "exporteraccesspolicies"}, + Verbs: []string{"get", "list", "watch", "create", "update", "patch", "delete"}, + }, + { + APIGroups: []string{"jumpstarter.dev"}, + Resources: []string{"clients/status", "exporters/status", "leases/status", "exporteraccesspolicies/status"}, + Verbs: []string{"get", "update", "patch"}, + }, + { + APIGroups: []string{"jumpstarter.dev"}, + Resources: []string{"clients/finalizers", "exporters/finalizers", "leases/finalizers", "exporteraccesspolicies/finalizers"}, + Verbs: []string{"update"}, + }, + { + APIGroups: []string{""}, + Resources: []string{"events"}, + Verbs: []string{"create", "patch"}, + }, + { + APIGroups: []string{"coordination.k8s.io"}, + Resources: []string{"leases"}, + Verbs: []string{"get", "list", "watch", "create", "update", "patch", "delete"}, + }, + }, + } +} + +// createRoleBinding creates a role binding for the controller +func (r *JumpstarterReconciler) createRoleBinding(jumpstarter *operatorv1alpha1.Jumpstarter) *rbacv1.RoleBinding { + return &rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-controller-rolebinding", jumpstarter.Name), + Namespace: jumpstarter.Namespace, + Labels: map[string]string{ + "app": "jumpstarter-controller", + "app.kubernetes.io/name": "jumpstarter-controller", + "app.kubernetes.io/managed-by": "jumpstarter-operator", + }, + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "Role", + Name: fmt.Sprintf("%s-controller-role", jumpstarter.Name), + }, + Subjects: []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: fmt.Sprintf("%s-controller-manager", jumpstarter.Name), + Namespace: jumpstarter.Namespace, + }, + }, + } +} diff --git a/controller/deploy/operator/internal/controller/jumpstarter/suite_test.go b/controller/deploy/operator/internal/controller/jumpstarter/suite_test.go new file mode 100644 index 000000000..83034fdec --- /dev/null +++ b/controller/deploy/operator/internal/controller/jumpstarter/suite_test.go @@ -0,0 +1,93 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jumpstarter + +import ( + "context" + "path/filepath" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + operatorv1alpha1 "github.com/jumpstarter-dev/jumpstarter-controller/deploy/operator/api/v1alpha1" + "github.com/jumpstarter-dev/jumpstarter-controller/deploy/operator/internal/controller/testutils" + // +kubebuilder:scaffold:imports +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +var ( + ctx context.Context + cancel context.CancelFunc + testEnv *envtest.Environment + cfg *rest.Config + k8sClient client.Client +) + +func TestControllers(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "Controller Suite") +} + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + ctx, cancel = context.WithCancel(context.TODO()) + + var err error + err = operatorv1alpha1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + // +kubebuilder:scaffold:scheme + + By("bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "config", "crd", "bases")}, + ErrorIfCRDPathMissing: true, + } + + // Retrieve the first found binary directory to allow running tests from IDEs + if binaryDir := testutils.GetFirstFoundEnvTestBinaryDir(5); binaryDir != "" { + testEnv.BinaryAssetsDirectory = binaryDir + } + + // cfg is defined in this file globally. + cfg, err = testEnv.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient).NotTo(BeNil()) +}) + +var _ = AfterSuite(func() { + By("tearing down the test environment") + cancel() + err := testEnv.Stop() + Expect(err).NotTo(HaveOccurred()) +}) diff --git a/controller/deploy/operator/internal/controller/testutils/envtest.go b/controller/deploy/operator/internal/controller/testutils/envtest.go new file mode 100644 index 000000000..eb5f506d1 --- /dev/null +++ b/controller/deploy/operator/internal/controller/testutils/envtest.go @@ -0,0 +1,57 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testutils + +import ( + "os" + "path/filepath" + + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +// GetFirstFoundEnvTestBinaryDir locates the first binary in the specified path. +// ENVTEST-based tests depend on specific binaries, usually located in paths set by +// controller-runtime. When running tests directly (e.g., via an IDE) without using +// Makefile targets, the 'BinaryAssetsDirectory' must be explicitly configured. +// +// This function streamlines the process by finding the required binaries, similar to +// setting the 'KUBEBUILDER_ASSETS' environment variable. To ensure the binaries are +// properly set up, run 'make setup-envtest' beforehand. +// +// The depth parameter specifies how many directories up to traverse from the test file +// to reach the operator root (where bin/k8s is located). +func GetFirstFoundEnvTestBinaryDir(depth int) string { + // Build the path based on depth + pathComponents := make([]string, 0, depth+2) + for i := 0; i < depth; i++ { + pathComponents = append(pathComponents, "..") + } + pathComponents = append(pathComponents, "bin", "k8s") + + basePath := filepath.Join(pathComponents...) + entries, err := os.ReadDir(basePath) + if err != nil { + logf.Log.Error(err, "Failed to read directory", "path", basePath) + return "" + } + for _, entry := range entries { + if entry.IsDir() { + return filepath.Join(basePath, entry.Name()) + } + } + return "" +} diff --git a/controller/deploy/operator/internal/log/levels.go b/controller/deploy/operator/internal/log/levels.go new file mode 100644 index 000000000..8b25f2033 --- /dev/null +++ b/controller/deploy/operator/internal/log/levels.go @@ -0,0 +1,18 @@ +package log + +// Log levels for use with controller-runtime's logr.Logger.V() method. +// Higher numbers mean more verbose logging. +const ( + // LevelInfo is the standard info level (V(0)) + LevelInfo = 0 + + // LevelDebug is for debug-level logging (V(1)) + // Use for detailed operational information that is useful for debugging + // but not needed during normal operation. + LevelDebug = 1 + + // LevelTrace is for trace-level logging (V(2)) + // Use for very detailed information about internal operations, + // useful for troubleshooting complex issues. + LevelTrace = 2 +) diff --git a/controller/deploy/operator/internal/utils/utils.go b/controller/deploy/operator/internal/utils/utils.go new file mode 100644 index 000000000..9796da5d5 --- /dev/null +++ b/controller/deploy/operator/internal/utils/utils.go @@ -0,0 +1,19 @@ +package utils + +// MergeMaps merges two string maps, with values from the second map taking precedence. +// This is useful for merging labels, annotations, or any other string key-value pairs. +func MergeMaps(base, overrides map[string]string) map[string]string { + merged := make(map[string]string) + + // Add base map first + for k, v := range base { + merged[k] = v + } + + // Override with values from second map + for k, v := range overrides { + merged[k] = v + } + + return merged +} diff --git a/controller/deploy/operator/test/e2e/e2e_suite_test.go b/controller/deploy/operator/test/e2e/e2e_suite_test.go new file mode 100644 index 000000000..43fe08289 --- /dev/null +++ b/controller/deploy/operator/test/e2e/e2e_suite_test.go @@ -0,0 +1,77 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "context" + "fmt" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + operatorv1alpha1 "github.com/jumpstarter-dev/jumpstarter-controller/deploy/operator/api/v1alpha1" +) + +var ( + cfg *rest.Config + k8sClient client.Client + clientset *kubernetes.Clientset + ctx context.Context +) + +// TestE2E runs the end-to-end (e2e) test suite for the project. These tests execute in an isolated, +// temporary environment to validate project changes with the purposed to be used in CI jobs. +// The default setup requires Kind, builds/loads the Manager Docker image locally, and installs +// CertManager. +func TestE2E(t *testing.T) { + RegisterFailHandler(Fail) + _, _ = fmt.Fprintf(GinkgoWriter, "Starting jumpstarter-operator integration test suite\n") + RunSpecs(t, "e2e suite") +} + +var _ = BeforeSuite(func() { + ctx = context.Background() + + By("bootstrapping test environment") + + var err error + // Use the in-cluster config or kubeconfig + cfg = ctrl.GetConfigOrDie() + + // Create the Kubernetes clientset for direct API calls + clientset, err = kubernetes.NewForConfig(cfg) + Expect(err).NotTo(HaveOccurred()) + + // Add the operator scheme to the default scheme + err = operatorv1alpha1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + // Create a controller-runtime client + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient).NotTo(BeNil()) +}) + +var _ = AfterSuite(func() { + +}) diff --git a/controller/deploy/operator/test/e2e/e2e_test.go b/controller/deploy/operator/test/e2e/e2e_test.go new file mode 100644 index 000000000..f304e7d09 --- /dev/null +++ b/controller/deploy/operator/test/e2e/e2e_test.go @@ -0,0 +1,804 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "bytes" + "fmt" + "io" + "os" + "os/exec" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + authenticationv1 "k8s.io/api/authentication/v1" + corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + rbacv1 "k8s.io/api/rbac/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/yaml" + "sigs.k8s.io/controller-runtime/pkg/client" + + operatorv1alpha1 "github.com/jumpstarter-dev/jumpstarter-controller/deploy/operator/api/v1alpha1" +) + +// namespace where the project is deployed in +const namespace = "jumpstarter-operator-system" + +// serviceAccountName created for the project +const serviceAccountName = "jumpstarter-operator-controller-manager" + +// metricsServiceName is the name of the metrics service of the project +const metricsServiceName = "jumpstarter-operator-controller-manager-metrics-service" + +// metricsRoleBindingName is the name of the RBAC that will be created to allow get the metrics data +const metricsRoleBindingName = "jumpstarter-operator-metrics-binding" + +// testNamespace is the namespace where the test will be run +const testNamespace = "jumpstarter-lab-e2e" + +var _ = Describe("Manager", Ordered, func() { + var controllerPodName string + + // After all tests have been executed, clean up by undeploying the controller, uninstalling CRDs, + // and deleting the namespace. + AfterAll(func() { + By("cleaning up the curl pod for metrics") + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "curl-metrics", + Namespace: namespace, + }, + } + _ = k8sClient.Delete(ctx, pod) + + By("waiting for curl pod to be deleted") + Eventually(func(g Gomega) { + getErr := k8sClient.Get(ctx, types.NamespacedName{ + Name: "curl-metrics", + Namespace: namespace, + }, pod) + g.Expect(apierrors.IsNotFound(getErr)).To(BeTrue()) + }, 30*time.Second).Should(Succeed()) + + By("deleting the jumpstarter-lab-e2e namespace") + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: testNamespace, + }, + } + _ = k8sClient.Delete(ctx, ns) + + By("waiting for namespace to be fully deleted") + Eventually(func(g Gomega) { + getErr := k8sClient.Get(ctx, types.NamespacedName{ + Name: testNamespace, + }, ns) + g.Expect(apierrors.IsNotFound(getErr)).To(BeTrue()) + }, 2*time.Minute).Should(Succeed()) + }) + + // After each test, check for failures and collect logs, events, + // and pod descriptions for debugging. + AfterEach(func() { + specReport := CurrentSpecReport() + if specReport.Failed() { + By("Fetching controller manager pod logs") + req := clientset.CoreV1().Pods(namespace).GetLogs(controllerPodName, &corev1.PodLogOptions{}) + podLogs, err := req.Stream(ctx) + if err == nil { + defer podLogs.Close() + buf := new(bytes.Buffer) + _, _ = io.Copy(buf, podLogs) + _, _ = fmt.Fprintf(GinkgoWriter, "Controller logs:\n %s", buf.String()) + } else { + _, _ = fmt.Fprintf(GinkgoWriter, "Failed to get Controller logs: %s", err) + } + + By("Fetching Kubernetes events") + eventList := &corev1.EventList{} + err = k8sClient.List(ctx, eventList, client.InNamespace(namespace)) + if err == nil { + _, _ = fmt.Fprintf(GinkgoWriter, "Kubernetes events:\n") + for _, event := range eventList.Items { + _, _ = fmt.Fprintf(GinkgoWriter, "%s %s %s %s\n", + event.LastTimestamp.Format(time.RFC3339), + event.InvolvedObject.Name, + event.Reason, + event.Message) + } + } else { + _, _ = fmt.Fprintf(GinkgoWriter, "Failed to get Kubernetes events: %s", err) + } + + By("Fetching curl-metrics logs") + req = clientset.CoreV1().Pods(namespace).GetLogs("curl-metrics", &corev1.PodLogOptions{}) + metricsLogs, err := req.Stream(ctx) + if err == nil { + defer metricsLogs.Close() + buf := new(bytes.Buffer) + _, _ = io.Copy(buf, metricsLogs) + _, _ = fmt.Fprintf(GinkgoWriter, "Metrics logs:\n %s", buf.String()) + } else { + _, _ = fmt.Fprintf(GinkgoWriter, "Failed to get curl-metrics logs: %s", err) + } + + By("Fetching controller manager pod description") + pod := &corev1.Pod{} + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: controllerPodName, + Namespace: namespace, + }, pod) + if err == nil { + fmt.Printf("Pod description:\nName: %s\nPhase: %s\nConditions: %+v\n", + pod.Name, pod.Status.Phase, pod.Status.Conditions) + } else { + fmt.Println("Failed to describe controller pod") + } + } + }) + + SetDefaultEventuallyTimeout(2 * time.Minute) + SetDefaultEventuallyPollingInterval(time.Second) + + Context("Manager", func() { + It("should run successfully", func() { + By("validating that the controller-manager pod is running as expected") + verifyControllerUp := func(g Gomega) { + // Get the name of the controller-manager pod + podList := &corev1.PodList{} + err := k8sClient.List(ctx, podList, + client.InNamespace(namespace), + client.MatchingLabels{"control-plane": "controller-manager"}) + g.Expect(err).NotTo(HaveOccurred(), "Failed to retrieve controller-manager pod information") + + // Filter out pods that are being deleted + var runningPods []corev1.Pod + for _, pod := range podList.Items { + if pod.DeletionTimestamp.IsZero() { + runningPods = append(runningPods, pod) + } + } + + g.Expect(runningPods).To(HaveLen(1), "expected 1 controller pod running") + controllerPodName = runningPods[0].Name + g.Expect(controllerPodName).To(ContainSubstring("controller-manager")) + + // Validate the pod's status + g.Expect(runningPods[0].Status.Phase).To(Equal(corev1.PodRunning), "Incorrect controller-manager pod status") + } + Eventually(verifyControllerUp).Should(Succeed()) + }) + + It("should ensure the metrics endpoint is serving metrics", func() { + By("creating a ClusterRoleBinding for the service account to allow access to metrics") + // Delete the ClusterRoleBinding if it exists (ignore errors) + crb := &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: metricsRoleBindingName, + }, + } + err := k8sClient.Delete(ctx, crb) + if err == nil { + By("waiting for existing ClusterRoleBinding to be deleted") + Eventually(func(g Gomega) { + getErr := k8sClient.Get(ctx, types.NamespacedName{ + Name: metricsRoleBindingName, + }, crb) + g.Expect(apierrors.IsNotFound(getErr)).To(BeTrue()) + }, 30*time.Second).Should(Succeed()) + } + + // Create the ClusterRoleBinding + crb = &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: metricsRoleBindingName, + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "ClusterRole", + Name: "jumpstarter-operator-metrics-reader", + }, + Subjects: []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: serviceAccountName, + Namespace: namespace, + }, + }, + } + err = k8sClient.Create(ctx, crb) + Expect(err).NotTo(HaveOccurred(), "Failed to create ClusterRoleBinding") + + By("validating that the metrics service is available") + svc := &corev1.Service{} + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: metricsServiceName, + Namespace: namespace, + }, svc) + Expect(err).NotTo(HaveOccurred(), "Metrics service should exist") + + By("getting the service account token") + token, err := serviceAccountToken() + Expect(err).NotTo(HaveOccurred()) + Expect(token).NotTo(BeEmpty()) + + By("waiting for the metrics endpoint to be ready") + verifyMetricsEndpointReady := func(g Gomega) { + endpoints := &corev1.Endpoints{} + err := k8sClient.Get(ctx, types.NamespacedName{ + Name: metricsServiceName, + Namespace: namespace, + }, endpoints) + g.Expect(err).NotTo(HaveOccurred()) + + hasPort := false + for _, subset := range endpoints.Subsets { + for _, port := range subset.Ports { + if port.Port == 8443 { + hasPort = true + break + } + } + } + g.Expect(hasPort).To(BeTrue(), "Metrics endpoint is not ready") + } + Eventually(verifyMetricsEndpointReady).Should(Succeed()) + + By("verifying that the controller manager is serving the metrics server") + verifyMetricsServerStarted := func(g Gomega) { + req := clientset.CoreV1().Pods(namespace).GetLogs(controllerPodName, &corev1.PodLogOptions{}) + podLogs, err := req.Stream(ctx) + g.Expect(err).NotTo(HaveOccurred()) + defer podLogs.Close() + buf := new(bytes.Buffer) + _, _ = io.Copy(buf, podLogs) + g.Expect(buf.String()).To(ContainSubstring("controller-runtime.metrics\tServing metrics server"), + "Metrics server not yet started") + } + Eventually(verifyMetricsServerStarted).Should(Succeed()) + + By("creating the curl-metrics pod to access the metrics endpoint") + curlPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "curl-metrics", + Namespace: namespace, + }, + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyNever, + ServiceAccountName: serviceAccountName, + Containers: []corev1.Container{ + { + Name: "curl", + Image: "curlimages/curl:8.10.1", + Command: []string{"/bin/sh", "-c"}, + Args: []string{ + fmt.Sprintf("curl -v -k -H 'Authorization: Bearer %s' https://%s.%s.svc.cluster.local:8443/metrics", + token, metricsServiceName, namespace), + }, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: func() *bool { b := false; return &b }(), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + RunAsNonRoot: func() *bool { b := true; return &b }(), + RunAsUser: func() *int64 { i := int64(1000); return &i }(), + SeccompProfile: &corev1.SeccompProfile{ + Type: corev1.SeccompProfileTypeRuntimeDefault, + }, + }, + }, + }, + }, + } + err = k8sClient.Create(ctx, curlPod) + Expect(err).NotTo(HaveOccurred(), "Failed to create curl-metrics pod") + + By("waiting for the curl-metrics pod to complete.") + verifyCurlUp := func(g Gomega) { + pod := &corev1.Pod{} + err := k8sClient.Get(ctx, types.NamespacedName{ + Name: "curl-metrics", + Namespace: namespace, + }, pod) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(pod.Status.Phase).To(Equal(corev1.PodSucceeded), "curl pod in wrong status") + } + Eventually(verifyCurlUp, 5*time.Minute).Should(Succeed()) + + By("getting the metrics by checking curl-metrics logs") + metricsOutput := getMetricsOutput() + Expect(metricsOutput).To(ContainSubstring( + "controller_runtime_reconcile_total", + )) + }) + + // +kubebuilder:scaffold:e2e-webhooks-checks + + // TODO: Customize the e2e test suite with scenarios specific to your project. + // Consider applying sample/CR(s) and check their status and/or verifying + // the reconciliation by using the metrics, i.e.: + // metricsOutput := getMetricsOutput() + // Expect(metricsOutput).To(ContainSubstring( + // fmt.Sprintf(`controller_runtime_reconcile_total{controller="%s",result="success"} 1`, + // strings.ToLower(), + // )) + }) + + Context("Jumpstarter operator", Ordered, func() { + const baseDomain = "jumpstarter.127.0.0.1.nip.io" + var dynamicTestNamespace string + + BeforeAll(func() { + dynamicTestNamespace = CreateTestNamespace() + }) + + It("should deploy jumpstarter successfully", func() { + By("creating a Jumpstarter custom resource") + // Get image from environment or use default + image := os.Getenv("IMG") + if image == "" { + image = "quay.io/jumpstarter-dev/jumpstarter-controller:latest" + } + + jumpstarterYAML := fmt.Sprintf(`apiVersion: operator.jumpstarter.dev/v1alpha1 +kind: Jumpstarter +metadata: + name: jumpstarter + namespace: %s +spec: + baseDomain: %s + useCertManager: false + controller: + image: %s + imagePullPolicy: IfNotPresent + replicas: 1 + grpc: + endpoints: + - address: grpc.%s:8082 + nodeport: + enabled: true + port: 30010 + authentication: + internal: + prefix: "internal:" + enabled: true + routers: + image: %s + imagePullPolicy: IfNotPresent + replicas: 1 + resources: + requests: + cpu: 100m + memory: 100Mi + grpc: + endpoints: + - address: router.%s:8083 + nodeport: + enabled: true + port: 30011 +`, dynamicTestNamespace, baseDomain, image, baseDomain, image, baseDomain) + + err := applyYAML(jumpstarterYAML) + Expect(err).NotTo(HaveOccurred(), "Failed to create Jumpstarter CR") + + By("verifying the Jumpstarter CR was created") + verifyJumpstarterCR := func(g Gomega) { + js := &operatorv1alpha1.Jumpstarter{} + err := k8sClient.Get(ctx, types.NamespacedName{ + Name: "jumpstarter", + Namespace: dynamicTestNamespace, + }, js) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(js.Name).To(Equal("jumpstarter")) + } + Eventually(verifyJumpstarterCR).Should(Succeed()) + + By("verifying the controller deployment was created") + verifyControllerDeployment := func(g Gomega) { + deploymentList := &corev1.PodList{} + err := k8sClient.List(ctx, deploymentList, + client.InNamespace(dynamicTestNamespace), + client.MatchingLabels{"app": "jumpstarter-controller"}) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(deploymentList.Items).NotTo(BeEmpty()) + } + Eventually(verifyControllerDeployment, 2*time.Minute).Should(Succeed()) + + By("verifying the router deployment was created") + verifyRouterDeployment := func(g Gomega) { + deploymentList := &corev1.PodList{} + err := k8sClient.List(ctx, deploymentList, + client.InNamespace(dynamicTestNamespace), + client.MatchingLabels{"app": "jumpstarter-router-0"}) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(deploymentList.Items).NotTo(BeEmpty()) + } + Eventually(verifyRouterDeployment, 2*time.Minute).Should(Succeed()) + + By("verifying the controller configmap exists and contains the expected contents") + verifyConfigMap := func(g Gomega) { + cm := &corev1.ConfigMap{} + err := k8sClient.Get(ctx, types.NamespacedName{ + Name: "jumpstarter-controller", + Namespace: dynamicTestNamespace, + }, cm) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(cm.Data).To(HaveKey("config")) + g.Expect(cm.Data).To(HaveKey("router")) + + expectedConfigYAML := `authentication: + internal: + prefix: 'internal:' + tokenLifetime: 43800h0m0s + jwt: [] + k8s: {} +grpc: + keepalive: + minTime: 1s + permitWithoutStream: true +provisioning: + enabled: false +` + expectedRouterYAML := `default: + endpoint: router.jumpstarter.127.0.0.1.nip.io:8083 +` + + // Compare config (YAML) + actualConfig := cm.Data["config"] + actualRouter := cm.Data["router"] + + // Unmarshal and compare as map[string]interface{} for robustness to field ordering + var actualConfigObj, expectedConfigObj map[string]interface{} + err = yaml.Unmarshal([]byte(actualConfig), &actualConfigObj) + g.Expect(err).NotTo(HaveOccurred()) + + err = yaml.Unmarshal([]byte(expectedConfigYAML), &expectedConfigObj) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(actualConfigObj).To(Equal(expectedConfigObj), "config map 'config' entry did not match expected") + + var actualRouterObj, expectedRouterObj map[string]interface{} + err = yaml.Unmarshal([]byte(actualRouter), &actualRouterObj) + g.Expect(err).NotTo(HaveOccurred()) + + err = yaml.Unmarshal([]byte(expectedRouterYAML), &expectedRouterObj) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(actualRouterObj).To(Equal(expectedRouterObj), "config map 'router' entry did not match expected") + } + Eventually(verifyConfigMap, 1*time.Minute).Should(Succeed()) + }) + + It("should allow access to grpc endpoints", func() { + By("checking endpoint grpc access to controller") + waitForGRPCEndpoint("grpc.jumpstarter.127.0.0.1.nip.io:8082", 1*time.Minute) + By("checking endpoint grpc access to router") + waitForGRPCEndpoint("router.jumpstarter.127.0.0.1.nip.io:8083", 1*time.Minute) + }) + + It("should create new routers if the number of replicas is increased", func() { + By("updating the Jumpstarter custom resource to increase the number of replicas") + // Update the jumpstarter object using the k8s client + jumpstarter := &operatorv1alpha1.Jumpstarter{} + err := k8sClient.Get(ctx, types.NamespacedName{ + Name: "jumpstarter", + Namespace: dynamicTestNamespace, + }, jumpstarter) + Expect(err).NotTo(HaveOccurred()) + + jumpstarter.Spec.Routers.Replicas = 3 + err = k8sClient.Update(ctx, jumpstarter) + Expect(err).NotTo(HaveOccurred()) + + By("verifying the new routers deployments were created") + allRoutersDeploymentsCreated := func(g Gomega) bool { + deployment := &appsv1.Deployment{} + + for i := 0; i < int(jumpstarter.Spec.Routers.Replicas); i++ { + err := k8sClient.Get(ctx, types.NamespacedName{ + Name: fmt.Sprintf("jumpstarter-router-%d", i), + Namespace: dynamicTestNamespace, + }, deployment) + // + if err != nil { + return false + } + Expect(*deployment.Spec.Replicas).To(Equal(int32(1))) + } + return true + } + Eventually(allRoutersDeploymentsCreated, 1*time.Minute).Should(BeTrue()) + By("verifying the new router services were created") + allRoutersServicesCreated := func(g Gomega) bool { + service := &corev1.Service{} + for i := 0; i < int(jumpstarter.Spec.Routers.Replicas); i++ { + err := k8sClient.Get(ctx, types.NamespacedName{ + Name: fmt.Sprintf("jumpstarter-router-%d-np", i), + Namespace: dynamicTestNamespace, + }, service) + if err != nil { + return false + } + // the selector should point to the specific router + Expect(service.Spec.Selector).To(HaveKeyWithValue("app", fmt.Sprintf("jumpstarter-router-%d", i))) + // the service should have exactly one port that points to the router port + Expect(service.Spec.Ports).To(HaveLen(1)) + Expect(service.Spec.Ports[0].Port).To(Equal(int32(8083))) + Expect(service.Spec.Ports[0].TargetPort).To(Equal(intstr.FromInt(8083))) + // and has the desired protocol and app protocol + Expect(service.Spec.Ports[0].Protocol).To(Equal(corev1.ProtocolTCP)) + Expect(*service.Spec.Ports[0].AppProtocol).To(Equal("h2c")) + } + return true + } + Eventually(allRoutersServicesCreated, 1*time.Minute).Should(BeTrue()) + }) + + It("should scale down the routers if the number of replicas is decreased", func() { + By("updating the Jumpstarter custom resource to decrease the number of replicas") + jumpstarter := &operatorv1alpha1.Jumpstarter{} + err := k8sClient.Get(ctx, types.NamespacedName{ + Name: "jumpstarter", + Namespace: dynamicTestNamespace, + }, jumpstarter) + Expect(err).NotTo(HaveOccurred()) + + jumpstarter.Spec.Routers.Replicas = 1 + err = k8sClient.Update(ctx, jumpstarter) + Expect(err).NotTo(HaveOccurred()) + + By("verifying the router deployments were scaled down") + routerDeploymentsCount := func(g Gomega) int { + deploymentList := &appsv1.DeploymentList{} + err := k8sClient.List(ctx, deploymentList, + client.InNamespace(dynamicTestNamespace), + client.MatchingLabels{"component": "router"}) + Expect(err).NotTo(HaveOccurred()) + return len(deploymentList.Items) + } + Eventually(routerDeploymentsCount, 1*time.Minute).Should(Equal(1)) + + By("verifying the router services were scaled down") + routerServicesCount := func(g Gomega) int { + serviceList := &corev1.ServiceList{} + err := k8sClient.List(ctx, serviceList, + client.InNamespace(dynamicTestNamespace), + client.MatchingLabels{"component": "router"}) + Expect(err).NotTo(HaveOccurred()) + return len(serviceList.Items) + } + Eventually(routerServicesCount, 1*time.Minute).Should(Equal(1)) + }) + + It("should setup ingress for the controller and router for ingress mode", func() { + By("updating the Jumpstarter custom resource to enable ingress mode") + jumpstarter := &operatorv1alpha1.Jumpstarter{} + err := k8sClient.Get(ctx, types.NamespacedName{ + Name: "jumpstarter", + Namespace: dynamicTestNamespace, + }, jumpstarter) + Expect(err).NotTo(HaveOccurred()) + + jumpstarter.Spec.Controller.GRPC.Endpoints = []operatorv1alpha1.Endpoint{ + { + Address: "grpc.jumpstarter.127.0.0.1.nip.io:5443", + Ingress: &operatorv1alpha1.IngressConfig{ + Enabled: true, + Class: "nginx", + }, + }, + } + jumpstarter.Spec.Routers.GRPC.Endpoints = []operatorv1alpha1.Endpoint{ + { + Address: "router.jumpstarter.127.0.0.1.nip.io:5443", + Ingress: &operatorv1alpha1.IngressConfig{ + Enabled: true, + Class: "nginx", + }, + }, + } + err = k8sClient.Update(ctx, jumpstarter) + Expect(err).NotTo(HaveOccurred()) + + By("verifying the ingress for the controller was created") + verifyIngressForController := func(g Gomega) bool { + ingress := &networkingv1.Ingress{} + err := k8sClient.Get(ctx, types.NamespacedName{ + Name: "controller-grpc-ing", + Namespace: dynamicTestNamespace, + }, ingress) + if err != nil { + return false + } + Expect(ingress.Spec.Rules).To(HaveLen(1)) + Expect(ingress.Spec.Rules[0].Host).To(Equal("grpc.jumpstarter.127.0.0.1.nip.io")) + Expect(ingress.Spec.Rules[0].HTTP.Paths).To(HaveLen(1)) + Expect(ingress.Spec.Rules[0].HTTP.Paths[0].Path).To(Equal("/")) + Expect(*ingress.Spec.Rules[0].HTTP.Paths[0].PathType).To(Equal(networkingv1.PathTypePrefix)) + return true + } + Eventually(verifyIngressForController, 1*time.Minute).Should(BeTrue()) + + By("verifying the ingress for the router was created") + verifyIngressForRouter := func(g Gomega) bool { + ingress := &networkingv1.Ingress{} + err := k8sClient.Get(ctx, types.NamespacedName{ + Name: "jumpstarter-router-0-ing", + Namespace: dynamicTestNamespace, + }, ingress) + if err != nil { + return false + } + Expect(ingress.Spec.Rules).To(HaveLen(1)) + Expect(ingress.Spec.Rules[0].Host).To(Equal("router.jumpstarter.127.0.0.1.nip.io")) + Expect(ingress.Spec.Rules[0].HTTP.Paths).To(HaveLen(1)) + Expect(ingress.Spec.Rules[0].HTTP.Paths[0].Path).To(Equal("/")) + Expect(*ingress.Spec.Rules[0].HTTP.Paths[0].PathType).To(Equal(networkingv1.PathTypePrefix)) + return true + } + Eventually(verifyIngressForRouter, 1*time.Minute).Should(BeTrue()) + }) + + It("should contain the right router configuration in the configmap", func() { + By("checking the configmap contains the right router configuration") + Eventually(func(g Gomega) string { + configmap := &corev1.ConfigMap{} + err := k8sClient.Get(ctx, types.NamespacedName{ + Name: "jumpstarter-controller", + Namespace: dynamicTestNamespace, + }, configmap) + g.Expect(err).NotTo(HaveOccurred()) + return configmap.Data["router"] + }, 1*time.Minute).Should(ContainSubstring("router.jumpstarter.127.0.0.1.nip.io:5443")) + }) + + It("should allow access to ingress grpc endpoints", func() { + // TODO: fix ingress in kind (not working for helm either) + Skip("nginx ingress not working in kind") + + By("checking endpoint grpc access to controller") + waitForGRPCEndpoint("grpc.jumpstarter.127.0.0.1.nip.io:5443", 1*time.Minute) + By("checking endpoint grpc access to router") + waitForGRPCEndpoint("router.jumpstarter.127.0.0.1.nip.io:5443", 1*time.Minute) + }) + + AfterAll(func() { + DeleteTestNamespace(dynamicTestNamespace) + }) + }) +}) + +// serviceAccountToken returns a token for the specified service account in the given namespace. +// It uses the Kubernetes TokenRequest API to generate a token by directly calling the API. +func serviceAccountToken() (string, error) { + var token string + verifyTokenCreation := func(g Gomega) { + // Create a token request for the service account + tokenRequest := &authenticationv1.TokenRequest{ + Spec: authenticationv1.TokenRequestSpec{ + ExpirationSeconds: func() *int64 { i := int64(3600); return &i }(), + }, + } + + // Use the clientset to create the token + result, err := clientset.CoreV1().ServiceAccounts(namespace).CreateToken( + ctx, + serviceAccountName, + tokenRequest, + metav1.CreateOptions{}, + ) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(result.Status.Token).NotTo(BeEmpty()) + + token = result.Status.Token + } + Eventually(verifyTokenCreation).Should(Succeed()) + + return token, nil +} + +// getMetricsOutput retrieves and returns the logs from the curl pod used to access the metrics endpoint. +func getMetricsOutput() string { + By("getting the curl-metrics logs") + req := clientset.CoreV1().Pods(namespace).GetLogs("curl-metrics", &corev1.PodLogOptions{}) + podLogs, err := req.Stream(ctx) + Expect(err).NotTo(HaveOccurred(), "Failed to retrieve logs from curl pod") + defer podLogs.Close() + + buf := new(bytes.Buffer) + _, _ = io.Copy(buf, podLogs) + metricsOutput := buf.String() + + Expect(metricsOutput).To(ContainSubstring("< HTTP/1.1 200 OK")) + return metricsOutput +} + +// applyYAML applies a YAML string to the Kubernetes cluster using the client. +// This function parses the YAML and creates/updates the resource using server-side apply. +// It supports any Kubernetes resource type. +func applyYAML(yamlContent string) error { + // Decode YAML to unstructured object + decoder := yaml.NewYAMLOrJSONDecoder(bytes.NewReader([]byte(yamlContent)), 4096) + obj := &unstructured.Unstructured{} + + err := decoder.Decode(obj) + if err != nil { + return fmt.Errorf("failed to decode YAML: %w", err) + } + + // Try to create the object first + err = k8sClient.Create(ctx, obj) + if err != nil { + // If it already exists, update it + if apierrors.IsAlreadyExists(err) { + // Get the existing object to get the resource version + existing := &unstructured.Unstructured{} + existing.SetGroupVersionKind(obj.GroupVersionKind()) + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: obj.GetName(), + Namespace: obj.GetNamespace(), + }, existing) + if err != nil { + return fmt.Errorf("failed to get existing resource: %w", err) + } + + // Set the resource version for update + obj.SetResourceVersion(existing.GetResourceVersion()) + err = k8sClient.Update(ctx, obj) + if err != nil { + return fmt.Errorf("failed to update resource: %w", err) + } + } else { + return fmt.Errorf("failed to create resource: %w", err) + } + } + + return nil +} + +// waitForGRPCEndpoint waits for a gRPC endpoint to be ready by attempting to list services using grpcurl. +// It uses Eventually from Gomega to poll the endpoint until it responds or times out. +// Args: +// - endpoint: the gRPC endpoint address (e.g., "grpc.jumpstarter.127.0.0.1.nip.io:8082") +// - timeout: maximum time to wait for the endpoint to be ready (default is used from Eventually if not specified) +func waitForGRPCEndpoint(endpoint string, timeout time.Duration) { + By(fmt.Sprintf("waiting for gRPC endpoint %s to be ready", endpoint)) + + // Get grpcurl path from environment or use default + grpcurlPath := os.Getenv("GRPCURL") + if grpcurlPath == "" { + grpcurlPath = "../../../../bin/grpcurl" // installed on the base jumpstarter-controller project + } + + // exec grpcurl -h to verify it is available + cmd := exec.Command(grpcurlPath, "-h") + err := cmd.Run() + Expect(err).NotTo(HaveOccurred(), "grpcurl is not available") + + checkEndpoint := func(g Gomega) { + cmd := exec.Command(grpcurlPath, "-insecure", endpoint, "list") + err := cmd.Run() + g.Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("gRPC endpoint %s is not ready", endpoint)) + } + + Eventually(checkEndpoint, timeout, 2*time.Second).Should(Succeed()) +} diff --git a/controller/deploy/operator/test/e2e/utils.go b/controller/deploy/operator/test/e2e/utils.go new file mode 100644 index 000000000..d68ca2f2f --- /dev/null +++ b/controller/deploy/operator/test/e2e/utils.go @@ -0,0 +1,73 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "fmt" + "os" + "time" + + "github.com/google/uuid" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +// CreateTestNamespace creates a unique test namespace with a UUID suffix. +// It returns the generated namespace name. +// The namespace name will be in the format: jumpstarter-e2e-{uuid} +func CreateTestNamespace() string { + namespaceName := fmt.Sprintf("jumpstarter-e2e-%s", uuid.New().String()) + + By(fmt.Sprintf("creating the test namespace %s", namespaceName)) + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespaceName, + }, + } + err := k8sClient.Create(ctx, ns) + Expect(err).NotTo(HaveOccurred(), "Failed to create test namespace") + + return namespaceName +} + +// DeleteTestNamespace deletes the specified namespace and waits for it to be fully removed. +// It uses a 2-minute timeout to ensure the namespace is completely deleted. +func DeleteTestNamespace(namespaceName string) { + By("deleting the test namespace") + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespaceName, + }, + } + _ = k8sClient.Delete(ctx, ns) + + // if environment variable E2E_NO_CLEANUP_WAIT is set, skip the wait + if os.Getenv("E2E_NO_CLEANUP_WAIT") == "true" { + return + } + By(fmt.Sprintf("waiting for namespace %s to be fully deleted", namespaceName)) + Eventually(func(g Gomega) { + getErr := k8sClient.Get(ctx, types.NamespacedName{ + Name: namespaceName, + }, ns) + g.Expect(apierrors.IsNotFound(getErr)).To(BeTrue()) + }, 2*time.Minute).Should(Succeed()) +} diff --git a/controller/deploy/operator/test/utils/utils.go b/controller/deploy/operator/test/utils/utils.go new file mode 100644 index 000000000..841683609 --- /dev/null +++ b/controller/deploy/operator/test/utils/utils.go @@ -0,0 +1,254 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package utils + +import ( + "bufio" + "bytes" + "fmt" + "os" + "os/exec" + "strings" + + . "github.com/onsi/ginkgo/v2" // nolint:revive,staticcheck +) + +const ( + prometheusOperatorVersion = "v0.77.1" + prometheusOperatorURL = "https://github.com/prometheus-operator/prometheus-operator/" + + "releases/download/%s/bundle.yaml" + + certmanagerVersion = "v1.16.3" + certmanagerURLTmpl = "https://github.com/cert-manager/cert-manager/releases/download/%s/cert-manager.yaml" +) + +func warnError(err error) { + _, _ = fmt.Fprintf(GinkgoWriter, "warning: %v\n", err) +} + +// Run executes the provided command within this context +func Run(cmd *exec.Cmd) (string, error) { + dir, _ := GetProjectDir() + cmd.Dir = dir + + if err := os.Chdir(cmd.Dir); err != nil { + _, _ = fmt.Fprintf(GinkgoWriter, "chdir dir: %q\n", err) + } + + cmd.Env = append(os.Environ(), "GO111MODULE=on") + command := strings.Join(cmd.Args, " ") + _, _ = fmt.Fprintf(GinkgoWriter, "running: %q\n", command) + output, err := cmd.CombinedOutput() + if err != nil { + return string(output), fmt.Errorf("%q failed with error %q: %w", command, string(output), err) + } + + return string(output), nil +} + +// InstallPrometheusOperator installs the prometheus Operator to be used to export the enabled metrics. +func InstallPrometheusOperator() error { + url := fmt.Sprintf(prometheusOperatorURL, prometheusOperatorVersion) + cmd := exec.Command("kubectl", "create", "-f", url) + _, err := Run(cmd) + return err +} + +// UninstallPrometheusOperator uninstalls the prometheus +func UninstallPrometheusOperator() { + url := fmt.Sprintf(prometheusOperatorURL, prometheusOperatorVersion) + cmd := exec.Command("kubectl", "delete", "-f", url) + if _, err := Run(cmd); err != nil { + warnError(err) + } +} + +// IsPrometheusCRDsInstalled checks if any Prometheus CRDs are installed +// by verifying the existence of key CRDs related to Prometheus. +func IsPrometheusCRDsInstalled() bool { + // List of common Prometheus CRDs + prometheusCRDs := []string{ + "prometheuses.monitoring.coreos.com", + "prometheusrules.monitoring.coreos.com", + "prometheusagents.monitoring.coreos.com", + } + + cmd := exec.Command("kubectl", "get", "crds", "-o", "custom-columns=NAME:.metadata.name") + output, err := Run(cmd) + if err != nil { + return false + } + crdList := GetNonEmptyLines(output) + for _, crd := range prometheusCRDs { + for _, line := range crdList { + if strings.Contains(line, crd) { + return true + } + } + } + + return false +} + +// UninstallCertManager uninstalls the cert manager +func UninstallCertManager() { + url := fmt.Sprintf(certmanagerURLTmpl, certmanagerVersion) + cmd := exec.Command("kubectl", "delete", "-f", url) + if _, err := Run(cmd); err != nil { + warnError(err) + } +} + +// InstallCertManager installs the cert manager bundle. +func InstallCertManager() error { + url := fmt.Sprintf(certmanagerURLTmpl, certmanagerVersion) + cmd := exec.Command("kubectl", "apply", "-f", url) + if _, err := Run(cmd); err != nil { + return err + } + // Wait for cert-manager-webhook to be ready, which can take time if cert-manager + // was re-installed after uninstalling on a cluster. + cmd = exec.Command("kubectl", "wait", "deployment.apps/cert-manager-webhook", + "--for", "condition=Available", + "--namespace", "cert-manager", + "--timeout", "5m", + ) + + _, err := Run(cmd) + return err +} + +// IsCertManagerCRDsInstalled checks if any Cert Manager CRDs are installed +// by verifying the existence of key CRDs related to Cert Manager. +func IsCertManagerCRDsInstalled() bool { + // List of common Cert Manager CRDs + certManagerCRDs := []string{ + "certificates.cert-manager.io", + "issuers.cert-manager.io", + "clusterissuers.cert-manager.io", + "certificaterequests.cert-manager.io", + "orders.acme.cert-manager.io", + "challenges.acme.cert-manager.io", + } + + // Execute the kubectl command to get all CRDs + cmd := exec.Command("kubectl", "get", "crds") + output, err := Run(cmd) + if err != nil { + return false + } + + // Check if any of the Cert Manager CRDs are present + crdList := GetNonEmptyLines(output) + for _, crd := range certManagerCRDs { + for _, line := range crdList { + if strings.Contains(line, crd) { + return true + } + } + } + + return false +} + +// LoadImageToKindClusterWithName loads a local docker image to the kind cluster +func LoadImageToKindClusterWithName(name string) error { + cluster := "kind" + if v, ok := os.LookupEnv("KIND_CLUSTER"); ok { + cluster = v + } + kindOptions := []string{"load", "docker-image", name, "--name", cluster} + cmd := exec.Command("kind", kindOptions...) + _, err := Run(cmd) + return err +} + +// GetNonEmptyLines converts given command output string into individual objects +// according to line breakers, and ignores the empty elements in it. +func GetNonEmptyLines(output string) []string { + var res []string + elements := strings.Split(output, "\n") + for _, element := range elements { + if element != "" { + res = append(res, element) + } + } + + return res +} + +// GetProjectDir will return the directory where the project is +func GetProjectDir() (string, error) { + wd, err := os.Getwd() + if err != nil { + return wd, fmt.Errorf("failed to get current working directory: %w", err) + } + wd = strings.ReplaceAll(wd, "/test/e2e", "") + return wd, nil +} + +// UncommentCode searches for target in the file and remove the comment prefix +// of the target content. The target content may span multiple lines. +func UncommentCode(filename, target, prefix string) error { + // false positive + // nolint:gosec + content, err := os.ReadFile(filename) + if err != nil { + return fmt.Errorf("failed to read file %q: %w", filename, err) + } + strContent := string(content) + + idx := strings.Index(strContent, target) + if idx < 0 { + return fmt.Errorf("unable to find the code %q to be uncomment", target) + } + + out := new(bytes.Buffer) + _, err = out.Write(content[:idx]) + if err != nil { + return fmt.Errorf("failed to write to output: %w", err) + } + + scanner := bufio.NewScanner(bytes.NewBufferString(target)) + if !scanner.Scan() { + return nil + } + for { + if _, err = out.WriteString(strings.TrimPrefix(scanner.Text(), prefix)); err != nil { + return fmt.Errorf("failed to write to output: %w", err) + } + // Avoid writing a newline in case the previous line was the last in target. + if !scanner.Scan() { + break + } + if _, err = out.WriteString("\n"); err != nil { + return fmt.Errorf("failed to write to output: %w", err) + } + } + + if _, err = out.Write(content[idx+len(target):]); err != nil { + return fmt.Errorf("failed to write to output: %w", err) + } + + // false positive + // nolint:gosec + if err = os.WriteFile(filename, out.Bytes(), 0644); err != nil { + return fmt.Errorf("failed to write file %q: %w", filename, err) + } + + return nil +} diff --git a/controller/dist/install.yaml b/controller/dist/install.yaml new file mode 100644 index 000000000..cbfbcc87a --- /dev/null +++ b/controller/dist/install.yaml @@ -0,0 +1,582 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: jumpstarter-router + control-plane: controller-manager + name: jumpstarter-router-system +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.15.0 + name: exporters.jumpstarter.dev +spec: + group: jumpstarter.dev + names: + kind: Exporter + listKind: ExporterList + plural: exporters + singular: exporter + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: Exporter is the Schema for the exporters API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ExporterSpec defines the desired state of Exporter + properties: + credentials: + items: + description: |- + SecretReference represents a Secret Reference. It has enough information to retrieve secret + in any namespace + properties: + name: + description: name is unique within a namespace to reference + a secret resource. + type: string + namespace: + description: namespace defines the space within which the secret + name must be unique. + type: string + type: object + x-kubernetes-map-type: atomic + type: array + type: object + status: + description: ExporterStatus defines the observed state of Exporter + properties: + conditions: + description: |- + INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + Important: Run "make" to regenerate code after modifying this file + items: + description: "Condition contains details for one aspect of the current + state of this API Resource.\n---\nThis struct is intended for + direct use as an array at the field path .status.conditions. For + example,\n\n\n\ttype FooStatus struct{\n\t // Represents the + observations of a foo's current state.\n\t // Known .status.conditions.type + are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // + +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t + \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t + \ // other fields\n\t}" + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: |- + type of condition in CamelCase or in foo.example.com/CamelCase. + --- + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be + useful (see .node.status.conditions), the ability to deconflict is important. + The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + devices: + items: + properties: + driver_interface: + type: string + labels: + additionalProperties: + type: string + type: object + uuid: + type: string + type: object + type: array + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.15.0 + name: identities.jumpstarter.dev +spec: + group: jumpstarter.dev + names: + kind: Identity + listKind: IdentityList + plural: identities + singular: identity + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: Identity is the Schema for the identities API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: IdentitySpec defines the desired state of Identity + properties: + credentials: + items: + description: |- + SecretReference represents a Secret Reference. It has enough information to retrieve secret + in any namespace + properties: + name: + description: name is unique within a namespace to reference + a secret resource. + type: string + namespace: + description: namespace defines the space within which the secret + name must be unique. + type: string + type: object + x-kubernetes-map-type: atomic + type: array + type: object + status: + description: IdentityStatus defines the observed state of Identity + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: jumpstarter-router + name: jumpstarter-router-controller-manager + namespace: jumpstarter-router-system +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: jumpstarter-router + name: jumpstarter-router-tokenholder + namespace: jumpstarter-router-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: jumpstarter-router + name: jumpstarter-router-leader-election-role + namespace: jumpstarter-router-system +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: jumpstarter-router + name: jumpstarter-router-exporter-editor-role +rules: +- apiGroups: + - jumpstarter.dev + resources: + - exporters + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - jumpstarter.dev + resources: + - exporters/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: jumpstarter-router + name: jumpstarter-router-exporter-viewer-role +rules: +- apiGroups: + - jumpstarter.dev + resources: + - exporters + verbs: + - get + - list + - watch +- apiGroups: + - jumpstarter.dev + resources: + - exporters/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: jumpstarter-router + name: jumpstarter-router-identity-editor-role +rules: +- apiGroups: + - jumpstarter.dev + resources: + - identities + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - jumpstarter.dev + resources: + - identities/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: jumpstarter-router + name: jumpstarter-router-identity-viewer-role +rules: +- apiGroups: + - jumpstarter.dev + resources: + - identities + verbs: + - get + - list + - watch +- apiGroups: + - jumpstarter.dev + resources: + - identities/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: jumpstarter-router-manager-role +rules: +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - serviceaccounts/token + verbs: + - create +- apiGroups: + - jumpstarter.dev + resources: + - exporters + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - jumpstarter.dev + resources: + - exporters/finalizers + verbs: + - update +- apiGroups: + - jumpstarter.dev + resources: + - exporters/status + verbs: + - get + - patch + - update +- apiGroups: + - jumpstarter.dev + resources: + - identities + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - jumpstarter.dev + resources: + - identities/finalizers + verbs: + - update +- apiGroups: + - jumpstarter.dev + resources: + - identities/status + verbs: + - get + - patch + - update +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: jumpstarter-router + name: jumpstarter-router-leader-election-rolebinding + namespace: jumpstarter-router-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: jumpstarter-router-leader-election-role +subjects: +- kind: ServiceAccount + name: jumpstarter-router-controller-manager + namespace: jumpstarter-router-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: jumpstarter-router + name: jumpstarter-router-manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: jumpstarter-router-manager-role +subjects: +- kind: ServiceAccount + name: jumpstarter-router-controller-manager + namespace: jumpstarter-router-system +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: jumpstarter-router + control-plane: controller-manager + name: jumpstarter-router-controller-manager-metrics-service + namespace: jumpstarter-router-system +spec: + ports: + - name: grpc + port: 8082 + protocol: TCP + targetPort: 8082 + selector: + control-plane: controller-manager +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: jumpstarter-router + control-plane: controller-manager + name: jumpstarter-router-controller-manager + namespace: jumpstarter-router-system +spec: + replicas: 1 + selector: + matchLabels: + control-plane: controller-manager + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: manager + labels: + control-plane: controller-manager + spec: + containers: + - args: + - --leader-elect + - --health-probe-bind-address=:8081 + command: + - /manager + image: quay.io/jumpstarter-dev/jumpstarter-controller:main + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + name: manager + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 10m + memory: 64Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + serviceAccountName: jumpstarter-router-controller-manager + terminationGracePeriodSeconds: 10 diff --git a/controller/go.mod b/controller/go.mod new file mode 100644 index 000000000..2b194d9c7 --- /dev/null +++ b/controller/go.mod @@ -0,0 +1,126 @@ +module github.com/jumpstarter-dev/jumpstarter-controller + +go 1.24.0 + +require ( + filippo.io/keygen v0.0.0-20240718133620-7f162efbbd87 + github.com/gin-gonic/gin v1.10.0 + github.com/go-jose/go-jose/v4 v4.0.4 + github.com/go-logr/logr v1.4.2 + github.com/golang-jwt/jwt/v5 v5.2.1 + github.com/google/uuid v1.6.0 + github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.2.0 + github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 + github.com/onsi/ginkgo/v2 v2.22.2 + github.com/onsi/gomega v1.36.2 + github.com/zitadel/oidc/v3 v3.34.1 + golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 + golang.org/x/sync v0.12.0 + google.golang.org/grpc v1.70.0 + google.golang.org/protobuf v1.36.5 + k8s.io/api v0.33.0 + k8s.io/apimachinery v0.33.0 + k8s.io/apiserver v0.33.0 + k8s.io/client-go v0.33.0 + k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 + sigs.k8s.io/controller-runtime v0.21.0 +) + +require ( + cel.dev/expr v0.19.1 // indirect + filippo.io/bigmod v0.0.3 // indirect + github.com/antlr4-go/antlr/v4 v4.13.0 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect + github.com/bmatcuk/doublestar/v4 v4.8.0 // indirect + github.com/bytedance/sonic v1.11.6 // indirect + github.com/bytedance/sonic/loader v0.1.1 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/cloudwego/base64x v0.1.4 // indirect + github.com/cloudwego/iasm v0.2.0 // indirect + github.com/coreos/go-oidc v2.3.0+incompatible // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/evanphx/json-patch/v5 v5.9.11 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect + github.com/gabriel-vasile/mimetype v1.4.3 // indirect + github.com/gin-contrib/sse v0.1.0 // indirect + github.com/go-chi/chi/v5 v5.2.0 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-logr/zapr v1.3.0 // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/swag v0.23.0 // indirect + github.com/go-playground/locales v0.14.1 // indirect + github.com/go-playground/universal-translator v0.18.1 // indirect + github.com/go-playground/validator/v10 v10.20.0 // indirect + github.com/go-task/slim-sprig/v3 v3.0.0 // indirect + github.com/goccy/go-json v0.10.2 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/google/btree v1.1.3 // indirect + github.com/google/cel-go v0.23.2 // indirect + github.com/google/gnostic-models v0.6.9 // indirect + github.com/google/go-cmp v0.7.0 // indirect + github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect + github.com/gorilla/securecookie v1.1.2 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/cpuid/v2 v2.2.7 // indirect + github.com/leodido/go-urn v1.4.0 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/muhlemmer/gu v0.3.1 // indirect + github.com/muhlemmer/httpforwarded v0.1.0 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/pelletier/go-toml/v2 v2.2.2 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pquerna/cachecontrol v0.1.0 // indirect + github.com/prometheus/client_golang v1.22.0 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.62.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect + github.com/rs/cors v1.11.1 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/spf13/cobra v1.8.1 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/stoewer/go-strcase v1.3.0 // indirect + github.com/twitchyliquid64/golang-asm v0.15.1 // indirect + github.com/ugorji/go/codec v1.2.12 // indirect + github.com/x448/float16 v0.8.4 // indirect + github.com/zitadel/logging v0.6.1 // indirect + github.com/zitadel/schema v1.3.0 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/otel v1.33.0 // indirect + go.opentelemetry.io/otel/metric v1.33.0 // indirect + go.opentelemetry.io/otel/trace v1.33.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.27.0 // indirect + golang.org/x/arch v0.8.0 // indirect + golang.org/x/crypto v0.36.0 // indirect + golang.org/x/net v0.38.0 // indirect + golang.org/x/oauth2 v0.27.0 // indirect + golang.org/x/sys v0.31.0 // indirect + golang.org/x/term v0.30.0 // indirect + golang.org/x/text v0.23.0 // indirect + golang.org/x/time v0.9.0 // indirect + golang.org/x/tools v0.28.0 // indirect + gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250204164813-702378808489 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250204164813-702378808489 // indirect + gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect + gopkg.in/go-jose/go-jose.v2 v2.6.3 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/apiextensions-apiserver v0.33.0 // indirect + k8s.io/component-base v0.33.0 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect + sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect +) diff --git a/controller/go.sum b/controller/go.sum new file mode 100644 index 000000000..574b36313 --- /dev/null +++ b/controller/go.sum @@ -0,0 +1,338 @@ +cel.dev/expr v0.19.1 h1:NciYrtDRIR0lNCnH1LFJegdjspNx9fI59O7TWcua/W4= +cel.dev/expr v0.19.1/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= +filippo.io/bigmod v0.0.3 h1:qmdCFHmEMS+PRwzrW6eUrgA4Q3T8D6bRcjsypDMtWHM= +filippo.io/bigmod v0.0.3/go.mod h1:WxGvOYE0OUaBC2N112Dflb3CjOnMBuNRA2UWZc2UbPE= +filippo.io/keygen v0.0.0-20240718133620-7f162efbbd87 h1:HlcHAMbI9Xvw3aWnhPngghMl5AKE2GOvjmvSGOKzCcI= +filippo.io/keygen v0.0.0-20240718133620-7f162efbbd87/go.mod h1:nAs0+DyACEQGudhkTwlPC9atyqDYC7ZotgZR7D8OwXM= +github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI= +github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/bmatcuk/doublestar/v4 v4.8.0 h1:DSXtrypQddoug1459viM9X9D3dp1Z7993fw36I2kNcQ= +github.com/bmatcuk/doublestar/v4 v4.8.0/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= +github.com/bytedance/sonic v1.11.6 h1:oUp34TzMlL+OY1OUWxHqsdkgC/Zfc85zGqw9siXjrc0= +github.com/bytedance/sonic v1.11.6/go.mod h1:LysEHSvpvDySVdC2f87zGWf6CIKJcAvqab1ZaiQtds4= +github.com/bytedance/sonic/loader v0.1.1 h1:c+e5Pt1k/cy5wMveRDyk2X4B9hF4g7an8N3zCYjJFNM= +github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU= +github.com/canonical/go-sp800.90a-drbg v0.0.0-20210314144037-6eeb1040d6c3 h1:oe6fCvaEpkhyW3qAicT0TnGtyht/UrgvOwMcEgLb7Aw= +github.com/canonical/go-sp800.90a-drbg v0.0.0-20210314144037-6eeb1040d6c3/go.mod h1:qdP0gaj0QtgX2RUZhnlVrceJ+Qln8aSlDyJwelLLFeM= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cloudwego/base64x v0.1.4 h1:jwCgWpFanWmN8xoIUHa2rtzmkd5J2plF/dnLS6Xd/0Y= +github.com/cloudwego/base64x v0.1.4/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w= +github.com/cloudwego/iasm v0.2.0 h1:1KNIy1I1H9hNNFEEH3DVnI4UujN+1zjpuk6gwHLTssg= +github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY= +github.com/coreos/go-oidc v2.3.0+incompatible h1:+5vEsrgprdLjjQ9FzIKAzQz1wwPD+83hQRfUIPh7rO0= +github.com/coreos/go-oidc v2.3.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/evanphx/json-patch v0.5.2 h1:xVCHIVMUu1wtM/VkR9jVZ45N3FhZfYMMYGorLCR8P3k= +github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= +github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= +github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0= +github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk= +github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.10.0 h1:nTuyha1TYqgedzytsKYqna+DfLos46nTv2ygFy86HFU= +github.com/gin-gonic/gin v1.10.0/go.mod h1:4PMNQiOhvDRa013RKVbsiNwoyezlm2rm0uX/T7kzp5Y= +github.com/go-chi/chi/v5 v5.2.0 h1:Aj1EtB0qR2Rdo2dG4O94RIU35w2lvQSj6BRA4+qwFL0= +github.com/go-chi/chi/v5 v5.2.0/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= +github.com/go-jose/go-jose/v4 v4.0.4 h1:VsjPI33J0SB9vQM6PLmNjoHqMQNGPiZ0rHL7Ni7Q6/E= +github.com/go-jose/go-jose/v4 v4.0.4/go.mod h1:NKb5HO1EZccyMpiZNbdUw/14tiXNyUJh188dfnMCAfc= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= +github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= +github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= +github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= +github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= +github.com/go-playground/validator/v10 v10.20.0 h1:K9ISHbSaI0lyB2eWMPJo+kOS/FBExVwjEviJTixqxL8= +github.com/go-playground/validator/v10 v10.20.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= +github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= +github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/cel-go v0.23.2 h1:UdEe3CvQh3Nv+E/j9r1Y//WO0K0cSyD7/y0bzyLIMI4= +github.com/google/cel-go v0.23.2/go.mod h1:52Pb6QsDbC5kvgxvZhiL9QX1oZEkcUF/ZqaPx1J5Wwo= +github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw= +github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg= +github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/securecookie v1.1.2 h1:YCIWL56dvtr73r6715mJs5ZvhtnY73hBvEF8kXD8ePA= +github.com/gorilla/securecookie v1.1.2/go.mod h1:NfCASbcHqRSY+3a8tlWJwsQap2VX5pwzwo4h3eOamfo= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.2.0 h1:kQ0NI7W1B3HwiN5gAYtY+XFItDPbLBwYRxAqbFTyDes= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.2.0/go.mod h1:zrT2dxOAjNFPRGjTUe2Xmb4q4YdUwVvQFV6xiCSf+z0= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 h1:TmHmbvxPmaegwhDubVz0lICL0J5Ka2vwTzhoePEXsGE= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0/go.mod h1:qztMSjm835F2bXf+5HKAPIS5qsmQDqZna/PgVt4rWtI= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM= +github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= +github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/muhlemmer/gu v0.3.1 h1:7EAqmFrW7n3hETvuAdmFmn4hS8W+z3LgKtrnow+YzNM= +github.com/muhlemmer/gu v0.3.1/go.mod h1:YHtHR+gxM+bKEIIs7Hmi9sPT3ZDUvTN/i88wQpZkrdM= +github.com/muhlemmer/httpforwarded v0.1.0 h1:x4DLrzXdliq8mprgUMR0olDvHGkou5BJsK/vWUetyzY= +github.com/muhlemmer/httpforwarded v0.1.0/go.mod h1:yo9czKedo2pdZhoXe+yDkGVbU0TJ0q9oQ90BVoDEtw0= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/onsi/ginkgo/v2 v2.22.2 h1:/3X8Panh8/WwhU/3Ssa6rCKqPLuAkVY2I0RoyDLySlU= +github.com/onsi/ginkgo/v2 v2.22.2/go.mod h1:oeMosUL+8LtarXBHu/c0bx2D/K9zyQ6uX3cTyztHwsk= +github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8= +github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY= +github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= +github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pquerna/cachecontrol v0.1.0 h1:yJMy84ti9h/+OEWa752kBTKv4XC30OtVVHYv/8cTqKc= +github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= +github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= +github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA= +github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= +github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= +github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= +github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE= +github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/zitadel/logging v0.6.1 h1:Vyzk1rl9Kq9RCevcpX6ujUaTYFX43aa4LkvV1TvUk+Y= +github.com/zitadel/logging v0.6.1/go.mod h1:Y4CyAXHpl3Mig6JOszcV5Rqqsojj+3n7y2F591Mp/ow= +github.com/zitadel/oidc/v3 v3.34.1 h1:/rxx2HxEowd8Sdb8sxcRxTu9pLy3/TXBLrewKOUMTHA= +github.com/zitadel/oidc/v3 v3.34.1/go.mod h1:lhAdAP1iWAnpfWF8CWNiO6yKvGFtPMuAubPwP5JC7Ec= +github.com/zitadel/schema v1.3.0 h1:kQ9W9tvIwZICCKWcMvCEweXET1OcOyGEuFbHs4o5kg0= +github.com/zitadel/schema v1.3.0/go.mod h1:NptN6mkBDFvERUCvZHlvWmmME+gmZ44xzwRXwhzsbtc= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw= +go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I= +go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ= +go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M= +go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM= +go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM= +go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU= +go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ= +go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s= +go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= +golang.org/x/arch v0.8.0 h1:3wRIsP3pM4yUptoR96otTUOXI367OS0+c9eeRi9doIc= +golang.org/x/arch v0.8.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= +golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= +golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= +golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= +golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= +golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= +golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= +golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= +golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= +golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= +golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= +golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= +golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8= +golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= +gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= +google.golang.org/genproto/googleapis/api v0.0.0-20250204164813-702378808489 h1:fCuMM4fowGzigT89NCIsW57Pk9k2D12MMi2ODn+Nk+o= +google.golang.org/genproto/googleapis/api v0.0.0-20250204164813-702378808489/go.mod h1:iYONQfRdizDB8JJBybql13nArx91jcUk7zCXEsOofM4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250204164813-702378808489 h1:5bKytslY8ViY0Cj/ewmRtrWHW64bNF03cAatUUFCdFI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250204164813-702378808489/go.mod h1:8BS3B93F/U1juMFq9+EDk+qOT5CO1R9IzXxG3PTqiRk= +google.golang.org/grpc v1.70.0 h1:pWFv03aZoHzlRKHWicjsZytKAiYCtNS0dHbXnIdq7jQ= +google.golang.org/grpc v1.70.0/go.mod h1:ofIJqVKDXx/JiXrwr2IG4/zwdH9txy3IlF40RmcJSQw= +google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= +google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= +gopkg.in/go-jose/go-jose.v2 v2.6.3 h1:nt80fvSDlhKWQgSWyHyy5CfmlQr+asih51R8PTWNKKs= +gopkg.in/go-jose/go-jose.v2 v2.6.3/go.mod h1:zzZDPkNNw/c9IE7Z9jr11mBZQhKQTMzoEEIoEdZlFBI= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +k8s.io/api v0.33.0 h1:yTgZVn1XEe6opVpP1FylmNrIFWuDqe2H0V8CT5gxfIU= +k8s.io/api v0.33.0/go.mod h1:CTO61ECK/KU7haa3qq8sarQ0biLq2ju405IZAd9zsiM= +k8s.io/apiextensions-apiserver v0.33.0 h1:d2qpYL7Mngbsc1taA4IjJPRJ9ilnsXIrndH+r9IimOs= +k8s.io/apiextensions-apiserver v0.33.0/go.mod h1:VeJ8u9dEEN+tbETo+lFkwaaZPg6uFKLGj5vyNEwwSzc= +k8s.io/apimachinery v0.33.0 h1:1a6kHrJxb2hs4t8EE5wuR/WxKDwGN1FKH3JvDtA0CIQ= +k8s.io/apimachinery v0.33.0/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= +k8s.io/apiserver v0.33.0 h1:QqcM6c+qEEjkOODHppFXRiw/cE2zP85704YrQ9YaBbc= +k8s.io/apiserver v0.33.0/go.mod h1:EixYOit0YTxt8zrO2kBU7ixAtxFce9gKGq367nFmqI8= +k8s.io/client-go v0.33.0 h1:UASR0sAYVUzs2kYuKn/ZakZlcs2bEHaizrrHUZg0G98= +k8s.io/client-go v0.33.0/go.mod h1:kGkd+l/gNGg8GYWAPr0xF1rRKvVWvzh9vmZAMXtaKOg= +k8s.io/component-base v0.33.0 h1:Ot4PyJI+0JAD9covDhwLp9UNkUja209OzsJ4FzScBNk= +k8s.io/component-base v0.33.0/go.mod h1:aXYZLbw3kihdkOPMDhWbjGCO6sg+luw554KP51t8qCU= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4= +k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8= +k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= +k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= +sigs.k8s.io/controller-runtime v0.21.0 h1:CYfjpEuicjUecRk+KAeyYh+ouUBn4llGyDYytIGcJS8= +sigs.k8s.io/controller-runtime v0.21.0/go.mod h1:OSg14+F65eWqIu4DceX7k/+QRAbTTvxeQSNSOQpukWM= +sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= +sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= +sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc= +sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/controller/hack/boilerplate.go.txt b/controller/hack/boilerplate.go.txt new file mode 100644 index 000000000..ff72ff2aa --- /dev/null +++ b/controller/hack/boilerplate.go.txt @@ -0,0 +1,15 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ \ No newline at end of file diff --git a/controller/hack/demoenv/exporters-statefulset.yaml b/controller/hack/demoenv/exporters-statefulset.yaml new file mode 100644 index 000000000..766cebcc5 --- /dev/null +++ b/controller/hack/demoenv/exporters-statefulset.yaml @@ -0,0 +1,36 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: exporter +spec: + serviceName: exporter + replicas: 5 + selector: + matchLabels: + exporter-mock: plain + template: + metadata: + labels: + exporter-mock: plain + spec: + restartPolicy: Always + containers: + - name: jumpstarter-exporter + image: quay.io/jumpstarter-dev/jumpstarter:0.5.0 + imagePullPolicy: IfNotPresent + env: + - name: JUMPSTARTER_GRPC_INSECURE + value: "1" + # note for some reason jmp-exporter run $(cat /etc/hostname) won't find the config, neither does list + command: ["/bin/sh", "-c", "echo running exporter $(cat /etc/hostname); jmp-exporter run -c /etc/jumpstarter/exporters/$(cat /etc/hostname).yaml"] + # map the exporter-configs ConfigMap to /etc/jumpstarter/exporters + volumeMounts: + - name: exporter-configs + mountPath: /etc/jumpstarter/exporters + volumes: + - name: exporter-configs + configMap: + name: exporter-configs + + + diff --git a/controller/hack/demoenv/gen/.gitignore b/controller/hack/demoenv/gen/.gitignore new file mode 100644 index 000000000..1e82fc7de --- /dev/null +++ b/controller/hack/demoenv/gen/.gitignore @@ -0,0 +1 @@ +*.yaml diff --git a/controller/hack/demoenv/kustomization.yaml b/controller/hack/demoenv/kustomization.yaml new file mode 100644 index 000000000..f185a8196 --- /dev/null +++ b/controller/hack/demoenv/kustomization.yaml @@ -0,0 +1,19 @@ +namespace: jumpstarter-exporters + +resources: + - exporters-statefulset.yaml + - vcan-exporters-statefulset.yaml +configMapGenerator: +- name: exporter-configs + files: + - gen/exporter-0.yaml + - gen/exporter-1.yaml + - gen/exporter-2.yaml + - gen/exporter-3.yaml + - gen/exporter-4.yaml + - gen/vcan-exporter-0.yaml + - gen/vcan-exporter-1.yaml + - gen/vcan-exporter-2.yaml + - gen/vcan-exporter-3.yaml + - gen/vcan-exporter-4.yaml + diff --git a/controller/hack/demoenv/prepare_exporters.sh b/controller/hack/demoenv/prepare_exporters.sh new file mode 100755 index 000000000..5546e5fea --- /dev/null +++ b/controller/hack/demoenv/prepare_exporters.sh @@ -0,0 +1,67 @@ +#!/bin/sh +OUT_DIR=${OUT_DIR:-"hack/demoenv/gen"} +NAMESPACE=${NAMESPACE:-"jumpstarter-lab"} + +mkdir -p ${OUT_DIR} +for i in `seq 0 4`; do + EXPORTER_NAME="exporter-$i" + echo "Creating exporter $EXPORTER_NAME" + OUT_FILE="${OUT_DIR}/${EXPORTER_NAME}.yaml" + bin/jmpctl exporter delete "${EXPORTER_NAME}" --namespace "${NAMESPACE}" > /dev/null 2>&1 + bin/jmpctl exporter create "${EXPORTER_NAME}" --namespace "${NAMESPACE}" > "${OUT_FILE}" + cat >> "${OUT_FILE}" < /dev/null 2>&1 + bin/jmpctl exporter create "${EXPORTER_NAME}" --namespace "${NAMESPACE}" > "${OUT_FILE}" + cat >> "${OUT_FILE}" < /dev/null; then + METHOD=upgrade +fi + +echo -e "${GREEN}Performing helm ${METHOD} ...${NC}" + +# install/update with helm +helm ${METHOD} --namespace jumpstarter-lab \ + --create-namespace \ + ${HELM_SETS} \ + --set global.timestamp=$(date +%s) \ + --values ./deploy/helm/jumpstarter/values.kind.yaml ${EXTRA_VALUES} jumpstarter \ + ./deploy/helm/jumpstarter/ + +kubectl config set-context --current --namespace=jumpstarter-lab + +# Check gRPC endpoints are ready +check_grpc_endpoints + +# Print success banner +print_deployment_success "Helm" diff --git a/controller/hack/deploy_with_operator.sh b/controller/hack/deploy_with_operator.sh new file mode 100755 index 000000000..84e5a14ea --- /dev/null +++ b/controller/hack/deploy_with_operator.sh @@ -0,0 +1,139 @@ +#!/usr/bin/env bash +set -exo pipefail +SCRIPT_DIR="$(dirname "$(readlink -f "$0")")" + +DEPLOY_JUMPSTARTER=${DEPLOY_JUMPSTARTER:-true} + +# Source common utilities +source "${SCRIPT_DIR}/utils" + +# Source common deployment variables +source "${SCRIPT_DIR}/deploy_vars" + +kubectl config use-context kind-jumpstarter + +# Install nginx ingress if in ingress mode +if [ "${NETWORKING_MODE}" = "ingress" ]; then + install_nginx_ingress +else + echo -e "${GREEN}Deploying with nodeport ...${NC}" +fi + +# load the container images into the kind cluster +kind_load_image "${IMG}" +kind_load_image "${OPERATOR_IMG}" + +# Deploy the operator +echo -e "${GREEN}Deploying Jumpstarter operator ...${NC}" +kubectl apply -f deploy/operator/dist/install.yaml + +# If operator deployment already exists, restart it to pick up the new image +if kubectl get deployment jumpstarter-operator-controller-manager -n jumpstarter-operator-system > /dev/null 2>&1; then + echo -e "${GREEN}Restarting operator deployment to pick up new image ...${NC}" + kubectl scale deployment jumpstarter-operator-controller-manager -n jumpstarter-operator-system --replicas=0 + kubectl wait --namespace jumpstarter-operator-system \ + --for=delete pod \ + --selector=control-plane=controller-manager \ + --timeout=60s 2>/dev/null || true + kubectl scale deployment jumpstarter-operator-controller-manager -n jumpstarter-operator-system --replicas=1 +fi + +# Wait for operator to be ready +echo -e "${GREEN}Waiting for operator to be ready ...${NC}" +kubectl wait --namespace jumpstarter-operator-system \ + --for=condition=available deployment/jumpstarter-operator-controller-manager \ + --timeout=120s + +if [ "${DEPLOY_JUMPSTARTER}" != "true" ]; then + echo -e "${GREEN}Skipping Jumpstarter deployment ...${NC}" + exit 0 +else + echo -e "${GREEN}Creating Jumpstarter custom resource ...${NC}" +fi + +# Create namespace for Jumpstarter deployment +echo -e "${GREEN}Creating jumpstarter-lab namespace ...${NC}" +kubectl create namespace jumpstarter-lab --dry-run=client -o yaml | kubectl apply -f - + + +# Generate endpoint configuration based on networking mode +if [ "${NETWORKING_MODE}" == "ingress" ]; then + CONTROLLER_ENDPOINT_CONFIG=$(cat <<-END + - address: grpc.${BASEDOMAIN}:5443 + ingress: + enabled: true + class: "nginx" +END +) + ROUTER_ENDPOINT_CONFIG=$(cat <<-END + - address: router.${BASEDOMAIN}:5443 + ingress: + enabled: true + class: "nginx" +END +) +else + CONTROLLER_ENDPOINT_CONFIG=$(cat <<-END + # this is exposed by a nodeport in 30010 but mapped to 8082 on the host + - address: grpc.${BASEDOMAIN}:8082 + nodeport: + enabled: true + port: 30010 +END +) + ROUTER_ENDPOINT_CONFIG=$(cat <<-END + # this is exposed by a nodeport in 30011 but mapped to 8083 on the host + - address: router.${BASEDOMAIN}:8083 + nodeport: + enabled: true + port: 30011 +END +) +fi + +# Apply the Jumpstarter CR with the appropriate endpoint configuration +cat </dev/null 1>/dev/null +if [ $? -eq 0 ]; then + echo "Helm already installed" + exit 0 +fi + +# Get the remote shell script and make sure it's the one we expect, inside the script there is also +# verification of the downloaded binaries +curl -fsSL -o /tmp/get_helm.sh https://raw.githubusercontent.com/helm/helm/0d0f91d1ce277b2c8766cdc4c7aa04dbafbf2503/scripts/get-helm-3 +echo "6701e269a95eec0a5f67067f504f43ad94e9b4a52ec1205d26b3973d6f5cb3dc /tmp/get_helm.sh" | sha256sum --check || exit 1 +chmod a+x /tmp/get_helm.sh +/tmp/get_helm.sh + +rm /tmp/get_helm.sh diff --git a/controller/hack/kind_cluster.yaml b/controller/hack/kind_cluster.yaml new file mode 100644 index 000000000..9cdf74eef --- /dev/null +++ b/controller/hack/kind_cluster.yaml @@ -0,0 +1,41 @@ +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +kubeadmConfigPatches: +- | + kind: ClusterConfiguration + apiServer: + extraArgs: + "service-node-port-range": "3000-32767" +- | + kind: InitConfiguration + nodeRegistration: + kubeletExtraArgs: + node-labels: "ingress-ready=true" +nodes: +- role: control-plane + extraPortMappings: + - containerPort: 80 # ingress controller + hostPort: 5080 + protocol: TCP + - containerPort: 30010 # grpc nodeport + hostPort: 8082 + protocol: TCP + - containerPort: 30011 # grpc router nodeport + hostPort: 8083 + protocol: TCP + - containerPort: 30012 # grpc router nodeport + hostPort: 8084 + protocol: TCP + - containerPort: 30013 # grpc router nodeport + hostPort: 8085 + protocol: TCP + - containerPort: 32000 # dex nodeport + hostPort: 5556 + protocol: TCP + - containerPort: 443 + hostPort: 5443 + protocol: TCP +# if we needed to mount a hostPath volume into the kind cluster, we can do it like this +# extraMounts: +# - hostPath: ./bin/e2e-certs +# containerPath: /tmp/e2e-certs diff --git a/controller/hack/setup_kind_rootless.sh b/controller/hack/setup_kind_rootless.sh new file mode 100755 index 000000000..cd743f392 --- /dev/null +++ b/controller/hack/setup_kind_rootless.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash + + +if which systemctl; then + + if [ -f /etc/systemd/system/user@.service.d/delegate.conf ]; then + echo "Kind systemd rootless already configured" && exit 0 + else + echo "Configuring Kind for rootless operation in Linux" + # Enable rootless Kind, see https://kind.sigs.k8s.io/docs/user/rootless/ + sudo mkdir -p /etc/systemd/system/user@.service.d + cat << EOF | sudo tee /etc/systemd/system/user@.service.d/delegate.conf > /dev/null +[Service] +Delegate=yes +EOF + + sudo systemctl daemon-reload + fi +fi + diff --git a/controller/hack/utils b/controller/hack/utils new file mode 100755 index 000000000..03ad95642 --- /dev/null +++ b/controller/hack/utils @@ -0,0 +1,172 @@ +#!/usr/bin/env bash +# Common utilities for Jumpstarter hack scripts + +set -eo pipefail + +# Script directory calculation helper +# Usage: SCRIPT_DIR="$(get_script_dir)" +get_script_dir() { + dirname "$(readlink -f "$0")" +} + +# Environment variable defaults +export KIND=${KIND:-bin/kind} +export GRPCURL=${GRPCURL:-bin/grpcurl} +export NETWORKING_MODE=${NETWORKING_MODE:-nodeport} + +# Color codes for terminal output +export GREEN='\033[0;32m' +export NC='\033[0m' # No Color + +# Get external IP address +# Returns the IP address used for outbound connections +get_external_ip() { + if which ip 2>/dev/null 1>/dev/null; then + ip route get 1.1.1.1 | grep -oP 'src \K\S+' + else + # MacOS does not have ip, so we use route and ifconfig instead + INTERFACE=$(route get 1.1.1.1 | grep interface | awk '{print $2}') + ifconfig | grep "$INTERFACE" -A 10 | grep "inet " | grep -Fv 127.0.0.1 | awk '{print $2}' | head -n 1 + fi +} + +# Load Docker/Podman image into kind cluster +# Function to save images to kind, with workaround for github CI and other environment issues +# In github CI, kind gets confused and tries to pull the image from docker instead +# of podman, so if regular docker-image fails we need to: +# * save it to OCI image format +# * then load it into kind +# Args: +# $1: image name (e.g., quay.io/jumpstarter-dev/jumpstarter-controller:latest) +# $2: kind cluster name (default: jumpstarter) +kind_load_image() { + local image=$1 + local cluster_name=${2:-jumpstarter} + + echo -e "${GREEN}Loading $1 in kind ...${NC}" + + # First, try to load the image directly + if ${KIND} load docker-image "${image}" --name "${cluster_name}" 2>/dev/null; then + echo "Image ${image} loaded successfully." + return + fi + + # Save to tar file + if podman save "${image}" | ${KIND} load image-archive /dev/stdin --name "${cluster_name}"; then + echo "Image loaded successfully." + else + echo "Error loading image ${image}." + exit 1 + fi +} + +# Install nginx ingress in kind cluster +# This function deploys nginx ingress and waits for it to be ready +install_nginx_ingress() { + echo -e "${GREEN}Deploying nginx ingress in kind ...${NC}" + + lsmod | grep ip_tables || \ + (echo "ip_tables module not loaded needed by nginx ingress, please run 'sudo modprobe ip_tables'" && exit 1) + +kubectl apply -f https://github.com/kubernetes/ingress-nginx/raw/refs/heads/main/deploy/static/provider/kind/deploy.yaml + + echo -e "${GREEN}Waiting for nginx to be ready ...${NC}" + + + kubectl wait --namespace ingress-nginx \ + --for=condition=available deployment/ingress-nginx-controller \ + --timeout=180s + + echo -e "${GREEN}Nginx ingress installed successfully${NC}" +} + +# Wait for Jumpstarter Kubernetes resources to be created and ready +# This is primarily used for operator deployments where resources are created asynchronously +# Args: +# $1: namespace (default: jumpstarter-lab) +wait_for_jumpstarter_resources() { + local namespace=${1:-jumpstarter-lab} + + echo -e "${GREEN}Waiting for Jumpstarter deployments to be ready ...${NC}" + + # Wait for controller deployment to exist + echo -e "${GREEN} * Waiting for controller deployment to be created ...${NC}" + local timeout=60 + while ! kubectl get deployment jumpstarter-controller -n "${namespace}" > /dev/null 2>&1; do + sleep 2 + timeout=$((timeout - 2)) + if [ ${timeout} -le 0 ]; then + echo -e "${GREEN} * Controller deployment not created after 60s, exiting ...${NC}" + exit 1 + fi + done + + # Wait for router deployment to exist + echo -e "${GREEN} * Waiting for router deployment to be created ...${NC}" + timeout=60 + while ! kubectl get deployment jumpstarter-router-0 -n "${namespace}" > /dev/null 2>&1; do + sleep 2 + timeout=$((timeout - 2)) + if [ ${timeout} -le 0 ]; then + echo -e "${GREEN} * Router deployment not created after 60s, exiting ...${NC}" + exit 1 + fi + done + + # Wait for controller deployment to be ready + echo -e "${GREEN} * Waiting for controller deployment to be ready ...${NC}" + kubectl wait --namespace "${namespace}" \ + --for=condition=available deployment/jumpstarter-controller \ + --timeout=180s + + # Wait for router statefulset to be ready + echo -e "${GREEN} * Waiting for router pods to be ready ...${NC}" + kubectl wait --namespace "${namespace}" \ + --for=condition=ready pod \ + --selector=app=jumpstarter-router-0 \ + --timeout=180s +} + +# Wait for gRPC endpoint to be ready +# Args: +# $1: endpoint (e.g., grpc.jumpstarter.192.168.1.1.nip.io:8082) +# $2: timeout in seconds (default: 120) +wait_for_grpc_endpoint() { + local endpoint=$1 + local timeout=${2:-120} + local retries=$((timeout / 2)) + + echo -e "${GREEN} * Checking ${endpoint} ... ${NC}" + while ! ${GRPCURL} -insecure "${endpoint}" list; do + sleep 2 + retries=$((retries - 1)) + if [ ${retries} -eq 0 ]; then + echo -e "${GREEN} * ${endpoint} not ready after ${timeout}s, exiting ... ${NC}" + exit 1 + fi + done +} + +# Check both gRPC endpoints (controller and router) are ready +check_grpc_endpoints() { + echo -e "${GREEN}Waiting for grpc endpoints to be ready:${NC}" + wait_for_grpc_endpoint "${GRPC_ENDPOINT}" + wait_for_grpc_endpoint "${GRPC_ROUTER_ENDPOINT}" +} + +# Print deployment success banner +# Args: +# $1: deployment method (e.g., "Helm", "operator") - optional +print_deployment_success() { + local method=${1:-""} + local method_text="" + + if [ -n "${method}" ]; then + method_text=" via ${method}" + fi + + echo -e "${GREEN}Jumpstarter controller deployed successfully${method_text}!${NC}" + echo -e " gRPC endpoint: ${GRPC_ENDPOINT}" + echo -e " gRPC router endpoint: ${GRPC_ROUTER_ENDPOINT}" +} + diff --git a/controller/internal/authentication/bearer.go b/controller/internal/authentication/bearer.go new file mode 100644 index 000000000..eb2765896 --- /dev/null +++ b/controller/internal/authentication/bearer.go @@ -0,0 +1,62 @@ +package authentication + +import ( + "context" + "strings" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + "k8s.io/apiserver/pkg/authentication/authenticator" +) + +var _ = ContextAuthenticator(&BearerTokenAuthenticator{}) + +type BearerTokenAuthenticator struct { + auth authenticator.Token +} + +func NewBearerTokenAuthenticator(auth authenticator.Token) *BearerTokenAuthenticator { + return &BearerTokenAuthenticator{auth: auth} +} + +func (b *BearerTokenAuthenticator) AuthenticateContext(ctx context.Context) (*authenticator.Response, bool, error) { + token, err := BearerTokenFromContext(ctx) + if err != nil { + return nil, false, err + } + + return b.auth.AuthenticateToken(ctx, token) +} + +func BearerTokenFromContext(ctx context.Context) (string, error) { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return "", status.Errorf(codes.InvalidArgument, "missing metadata") + } + + authorizations := md.Get("authorization") + + if len(authorizations) < 1 { + return "", status.Errorf(codes.Unauthenticated, "missing authorization header") + } + + // Reference: https://www.rfc-editor.org/rfc/rfc7230#section-3.2.2 + // A sender MUST NOT generate multiple header fields with the same field name in a message + if len(authorizations) > 1 { + return "", status.Errorf(codes.InvalidArgument, "multiple authorization headers") + } + + // Invariant: len(authorizations) == 1 + authorization := authorizations[0] + + // Reference: https://github.com/golang-jwt/jwt/blob/62e504c2/request/extractor.go#L93 + if len(authorization) < 7 || !strings.EqualFold(authorization[:7], "Bearer ") { + return "", status.Errorf(codes.InvalidArgument, "malformed authorization header") + } + + // Invariant: len(authorization) >= 7 + token := authorization[7:] + + return token, nil +} diff --git a/controller/internal/authentication/types.go b/controller/internal/authentication/types.go new file mode 100644 index 000000000..08632943d --- /dev/null +++ b/controller/internal/authentication/types.go @@ -0,0 +1,11 @@ +package authentication + +import ( + "context" + + "k8s.io/apiserver/pkg/authentication/authenticator" +) + +type ContextAuthenticator interface { + AuthenticateContext(context.Context) (*authenticator.Response, bool, error) +} diff --git a/controller/internal/authorization/basic.go b/controller/internal/authorization/basic.go new file mode 100644 index 000000000..c94015c04 --- /dev/null +++ b/controller/internal/authorization/basic.go @@ -0,0 +1,80 @@ +package authorization + +import ( + "context" + "slices" + + jumpstarterdevv1alpha1 "github.com/jumpstarter-dev/jumpstarter-controller/api/v1alpha1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apiserver/pkg/authorization/authorizer" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type BasicAuthorizer struct { + client client.Client + prefix string + provisioning bool +} + +func NewBasicAuthorizer(client client.Client, prefix string, provisioning bool) authorizer.Authorizer { + return &BasicAuthorizer{ + client: client, + prefix: prefix, + provisioning: provisioning, + } +} + +func (b *BasicAuthorizer) Authorize( + ctx context.Context, + attributes authorizer.Attributes, +) (authorizer.Decision, string, error) { + switch attributes.GetResource() { + case "Exporter": + var e jumpstarterdevv1alpha1.Exporter + if err := b.client.Get(ctx, client.ObjectKey{ + Namespace: attributes.GetNamespace(), + Name: attributes.GetName(), + }, &e); err != nil { + return authorizer.DecisionDeny, "failed to get exporter", err + } + if slices.Contains(e.Usernames(b.prefix), attributes.GetUser().GetName()) { + return authorizer.DecisionAllow, "", nil + } else { + return authorizer.DecisionDeny, "", nil + } + case "Client": + var c jumpstarterdevv1alpha1.Client + err := b.client.Get(ctx, client.ObjectKey{ + Namespace: attributes.GetNamespace(), + Name: attributes.GetName(), + }, &c) + if err != nil { + if apierrors.IsNotFound(err) && b.provisioning { + c = jumpstarterdevv1alpha1.Client{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: attributes.GetNamespace(), + Name: attributes.GetName(), + }, + Spec: jumpstarterdevv1alpha1.ClientSpec{ + Username: ptr.To(attributes.GetUser().GetName()), + }, + } + if err := b.client.Create(ctx, &c); err != nil { + return authorizer.DecisionDeny, "failed to provision client", err + } + } else { + return authorizer.DecisionDeny, "failed to get client", err + } + } + + if slices.Contains(c.Usernames(b.prefix), attributes.GetUser().GetName()) { + return authorizer.DecisionAllow, "", nil + } else { + return authorizer.DecisionDeny, "", nil + } + default: + return authorizer.DecisionDeny, "invalid object kind", nil + } +} diff --git a/controller/internal/authorization/metadata.go b/controller/internal/authorization/metadata.go new file mode 100644 index 000000000..39d6db9d8 --- /dev/null +++ b/controller/internal/authorization/metadata.go @@ -0,0 +1,105 @@ +package authorization + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "regexp" + "strings" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/apiserver/pkg/authorization/authorizer" +) + +var _ = ContextAttributesGetter(&MetadataAttributesGetter{}) + +var ( + invalidChar = regexp.MustCompile("[^-a-zA-Z0-9]") + multipleHyphen = regexp.MustCompile("-+") + surroundingHyphen = regexp.MustCompile("^-|-$") +) + +type MetadataAttributesGetterConfig struct { + NamespaceKey string + ResourceKey string + NameKey string +} + +type MetadataAttributesGetter struct { + config MetadataAttributesGetterConfig +} + +func NewMetadataAttributesGetter(config MetadataAttributesGetterConfig) *MetadataAttributesGetter { + return &MetadataAttributesGetter{ + config: config, + } +} + +func normalizeName(name string) string { + hash := sha256.Sum256([]byte(name)) + + sanitized := strings.ToLower(name) + sanitized = invalidChar.ReplaceAllString(sanitized, "-") + sanitized = multipleHyphen.ReplaceAllString(sanitized, "-") + sanitized = surroundingHyphen.ReplaceAllString(sanitized, "") + + if len(sanitized) > 37 { + sanitized = sanitized[:37] + } + + return strings.Join([]string{ + "oidc", + sanitized, + hex.EncodeToString(hash[:3]), + }, "-") +} + +func (b *MetadataAttributesGetter) ContextAttributes( + ctx context.Context, + userInfo user.Info, +) (authorizer.Attributes, error) { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return nil, status.Errorf(codes.InvalidArgument, "missing metadata") + } + + namespace, err := mdGet(md, b.config.NamespaceKey) + if err != nil { + return nil, err + } + + resource, err := mdGet(md, b.config.ResourceKey) + if err != nil { + return nil, err + } + + name, err := mdGet(md, b.config.NameKey) + if err != nil { + return nil, err + } + + if name == "" { + name = normalizeName(userInfo.GetName()) + } + + return authorizer.AttributesRecord{ + User: userInfo, + Namespace: namespace, + Resource: resource, + Name: name, + }, nil +} + +func mdGet(md metadata.MD, k string) (string, error) { + v := md.Get(k) + if len(v) < 1 { + return "", status.Errorf(codes.InvalidArgument, "missing metadata: %s", k) + } + if len(v) > 1 { + return "", status.Errorf(codes.InvalidArgument, "multiple metadata: %s", k) + } + return v[0], nil +} diff --git a/controller/internal/authorization/metadata_test.go b/controller/internal/authorization/metadata_test.go new file mode 100644 index 000000000..387df2b0f --- /dev/null +++ b/controller/internal/authorization/metadata_test.go @@ -0,0 +1,47 @@ +package authorization + +import ( + "strings" + "testing" + + "k8s.io/apimachinery/pkg/util/validation" +) + +func TestNormalizeName(t *testing.T) { + testcases := []struct { + input string + output string + }{ + { + input: "foo", + output: "oidc-foo-2c26b4", + }, + { + input: "foo@example.com", + output: "oidc-foo-example-com-321ba1", + }, + { + input: "foo@@@@@example.com", + output: "oidc-foo-example-com-5ac340", + }, + { + input: "@foo@example.com@", + output: "oidc-foo-example-com-5be6ea", + }, + { + input: strings.Repeat("foo", 30), + output: "oidc-foofoofoofoofoofoofoofoofoofoofoofoof-4ac4a7", + }, + } + for _, testcase := range testcases { + result := normalizeName(testcase.input) + if validation.IsDNS1123Subdomain(result) != nil { + t.Errorf("normalizing the name %s does not produce a valid RFC1123 subdomain, but %s", + testcase.input, result) + } + if result != testcase.output { + t.Errorf("normalizing the name %s does not produce the expected output %s, but %s", + testcase.input, testcase.output, result) + } + } +} diff --git a/controller/internal/authorization/types.go b/controller/internal/authorization/types.go new file mode 100644 index 000000000..7ee05a586 --- /dev/null +++ b/controller/internal/authorization/types.go @@ -0,0 +1,12 @@ +package authorization + +import ( + "context" + + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/apiserver/pkg/authorization/authorizer" +) + +type ContextAttributesGetter interface { + ContextAttributes(context.Context, user.Info) (authorizer.Attributes, error) +} diff --git a/controller/internal/config/config.go b/controller/internal/config/config.go new file mode 100644 index 000000000..2fbb58746 --- /dev/null +++ b/controller/internal/config/config.go @@ -0,0 +1,118 @@ +package config + +import ( + "context" + "fmt" + "time" + + "github.com/jumpstarter-dev/jumpstarter-controller/internal/oidc" + "google.golang.org/grpc" + "google.golang.org/grpc/keepalive" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/apiserver/pkg/authentication/authenticator" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func LoadRouterConfiguration( + ctx context.Context, + client client.Reader, + key client.ObjectKey, +) (grpc.ServerOption, error) { + var configmap corev1.ConfigMap + if err := client.Get(ctx, key, &configmap); err != nil { + return nil, err + } + + rawConfig, ok := configmap.Data["config"] + if !ok { + return nil, fmt.Errorf("LoadRouterConfiguration: missing config section") + } + + var config Config + err := yaml.UnmarshalStrict([]byte(rawConfig), &config) + if err != nil { + return nil, err + } + + serverOptions, err := LoadGrpcConfiguration(config.Grpc) + if err != nil { + return nil, err + } + + return serverOptions, nil +} + +func LoadConfiguration( + ctx context.Context, + client client.Reader, + scheme *runtime.Scheme, + key client.ObjectKey, + signer *oidc.Signer, + certificateAuthority string, +) (authenticator.Token, string, Router, grpc.ServerOption, *Provisioning, error) { + var configmap corev1.ConfigMap + if err := client.Get(ctx, key, &configmap); err != nil { + return nil, "", nil, nil, nil, err + } + + rawRouter, ok := configmap.Data["router"] + if !ok { + return nil, "", nil, nil, nil, fmt.Errorf("LoadConfiguration: missing router section") + } + + var router Router + if err := yaml.Unmarshal([]byte(rawRouter), &router); err != nil { + return nil, "", nil, nil, nil, err + } + + rawAuthenticationConfiguration, ok := configmap.Data["authentication"] + if ok { + // backwards compatibility + // TODO: remove in 0.7.0 + authenticator, prefix, err := oidc.LoadAuthenticationConfiguration( + ctx, + scheme, + []byte(rawAuthenticationConfiguration), + signer, + certificateAuthority, + ) + if err != nil { + return nil, "", nil, nil, nil, err + } + + return authenticator, prefix, router, grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{ + MinTime: 1 * time.Second, + PermitWithoutStream: true, + }), &Provisioning{Enabled: false}, nil + } + + rawConfig, ok := configmap.Data["config"] + if !ok { + return nil, "", nil, nil, nil, fmt.Errorf("LoadConfiguration: missing config section") + } + + var config Config + if err := yaml.UnmarshalStrict([]byte(rawConfig), &config); err != nil { + return nil, "", nil, nil, nil, err + } + + authenticator, prefix, err := LoadAuthenticationConfiguration( + ctx, + scheme, + config.Authentication, + signer, + certificateAuthority, + ) + if err != nil { + return nil, "", nil, nil, nil, err + } + + serverOptions, err := LoadGrpcConfiguration(config.Grpc) + if err != nil { + return nil, "", nil, nil, nil, err + } + + return authenticator, prefix, router, serverOptions, &config.Provisioning, nil +} diff --git a/controller/internal/config/grpc.go b/controller/internal/config/grpc.go new file mode 100644 index 000000000..be17f2cf4 --- /dev/null +++ b/controller/internal/config/grpc.go @@ -0,0 +1,31 @@ +package config + +import ( + "fmt" + + "google.golang.org/grpc" + "google.golang.org/grpc/keepalive" +) + +// LoadGrpcConfiguration loads the gRPC server configuration from the parsed Config struct. +// It creates a gRPC server option with keepalive enforcement policy configured. +func LoadGrpcConfiguration(config Grpc) (grpc.ServerOption, error) { + ka := config.Keepalive + + // Parse MinTime with default of 1s + minTime, err := ParseDuration(ka.MinTime) + if err != nil { + return nil, fmt.Errorf("failed to parse keepalive minTime: %w", err) + } + if minTime == 0 { + minTime = 1e9 // 1 second default + } + + // Create the keepalive enforcement policy + policy := keepalive.EnforcementPolicy{ + MinTime: minTime, + PermitWithoutStream: ka.PermitWithoutStream, + } + + return grpc.KeepaliveEnforcementPolicy(policy), nil +} diff --git a/controller/internal/config/oidc.go b/controller/internal/config/oidc.go new file mode 100644 index 000000000..3a79e9077 --- /dev/null +++ b/controller/internal/config/oidc.go @@ -0,0 +1,96 @@ +package config + +import ( + "context" + "os" + + "github.com/jumpstarter-dev/jumpstarter-controller/internal/oidc" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apiserver/pkg/apis/apiserver" + apiserverv1beta1 "k8s.io/apiserver/pkg/apis/apiserver/v1beta1" + "k8s.io/apiserver/pkg/authentication/authenticator" + tokenunion "k8s.io/apiserver/pkg/authentication/token/union" + "k8s.io/apiserver/pkg/server/dynamiccertificates" + koidc "k8s.io/apiserver/plugin/pkg/authenticator/token/oidc" +) + +func LoadAuthenticationConfiguration( + ctx context.Context, + scheme *runtime.Scheme, + config Authentication, + signer *oidc.Signer, + certificateAuthority string, +) (authenticator.Token, string, error) { + if config.Internal.Prefix == "" { + config.Internal.Prefix = "internal:" + } + + config.JWT = append(config.JWT, apiserverv1beta1.JWTAuthenticator{ + Issuer: apiserverv1beta1.Issuer{ + URL: signer.Issuer(), + CertificateAuthority: certificateAuthority, + Audiences: []string{signer.Audience()}, + }, + ClaimMappings: apiserverv1beta1.ClaimMappings{ + Username: apiserverv1beta1.PrefixedClaimOrExpression{ + Claim: "sub", + Prefix: &config.Internal.Prefix, + }, + }, + }) + + authn, err := newJWTAuthenticator( + ctx, + scheme, + config, + ) + if err != nil { + return nil, "", err + } + + return authn, config.Internal.Prefix, nil +} + +// Reference: https://github.com/kubernetes/kubernetes/blob/v1.32.1/pkg/kubeapiserver/authenticator/config.go#L244 +func newJWTAuthenticator( + ctx context.Context, + scheme *runtime.Scheme, + config Authentication, +) (authenticator.Token, error) { + var jwtAuthenticators []authenticator.Token + for _, jwtAuthenticator := range config.JWT { + var oidcCAContent koidc.CAContentProvider + if len(jwtAuthenticator.Issuer.CertificateAuthority) > 0 { + var oidcCAError error + if _, err := os.Stat(jwtAuthenticator.Issuer.CertificateAuthority); err == nil { + oidcCAContent, oidcCAError = dynamiccertificates.NewDynamicCAContentFromFile( + "oidc-authenticator", + jwtAuthenticator.Issuer.CertificateAuthority, + ) + jwtAuthenticator.Issuer.CertificateAuthority = "" + } else { + oidcCAContent, oidcCAError = dynamiccertificates.NewStaticCAContent( + "oidc-authenticator", + []byte(jwtAuthenticator.Issuer.CertificateAuthority), + ) + } + if oidcCAError != nil { + return nil, oidcCAError + } + } + var jwtAuthenticatorUnversioned apiserver.JWTAuthenticator + if err := scheme.Convert(&jwtAuthenticator, &jwtAuthenticatorUnversioned, nil); err != nil { + return nil, err + } + oidcAuth, err := koidc.New(ctx, koidc.Options{ + JWTAuthenticator: jwtAuthenticatorUnversioned, + CAContentProvider: oidcCAContent, + SupportedSigningAlgs: koidc.AllValidSigningAlgorithms(), + }) + if err != nil { + return nil, err + } + jwtAuthenticators = append(jwtAuthenticators, oidcAuth) + } + return tokenunion.NewFailOnError(jwtAuthenticators...), nil +} diff --git a/controller/internal/config/types.go b/controller/internal/config/types.go new file mode 100644 index 000000000..805a022e0 --- /dev/null +++ b/controller/internal/config/types.go @@ -0,0 +1,105 @@ +package config + +import ( + "time" + + apiserverv1beta1 "k8s.io/apiserver/pkg/apis/apiserver/v1beta1" +) + +// Config represents the main controller configuration structure. +// This matches the YAML structure in the ConfigMap's "config" key. +type Config struct { + Authentication Authentication `json:"authentication" yaml:"authentication"` + Provisioning Provisioning `json:"provisioning" yaml:"provisioning"` + Grpc Grpc `json:"grpc" yaml:"grpc"` +} + +// Authentication defines the authentication configuration for the controller. +// Supports multiple authentication methods: internal tokens, Kubernetes tokens, and JWT. +type Authentication struct { + Internal Internal `json:"internal" yaml:"internal"` + K8s K8s `json:"k8s,omitempty" yaml:"k8s,omitempty"` + JWT []apiserverv1beta1.JWTAuthenticator `json:"jwt" yaml:"jwt"` +} + +// Internal defines the internal token authentication configuration. +type Internal struct { + // Prefix to add to the subject claim of issued tokens (e.g., "internal:") + Prefix string `json:"prefix" yaml:"prefix"` + + // TokenLifetime defines how long issued tokens are valid. + // Parsed as a Go duration string (e.g., "43800h", "30d"). + TokenLifetime string `json:"tokenLifetime,omitempty" yaml:"tokenLifetime,omitempty"` +} + +// K8s defines the Kubernetes service account token authentication configuration. +type K8s struct { + // Enabled indicates whether Kubernetes authentication is enabled. + Enabled bool `json:"enabled,omitempty" yaml:"enabled,omitempty"` +} + +// Provisioning defines the provisioning configuration. +type Provisioning struct { + Enabled bool `json:"enabled" yaml:"enabled"` +} + +// Grpc defines the gRPC server configuration. +type Grpc struct { + Keepalive Keepalive `json:"keepalive" yaml:"keepalive"` +} + +// Keepalive defines the gRPC keepalive configuration. +// All duration fields are parsed as Go duration strings (e.g., "1s", "10s", "180s"). +type Keepalive struct { + // MinTime is the minimum time between keepalives that the server will accept. + // Default: "1s" + MinTime string `json:"minTime,omitempty" yaml:"minTime,omitempty"` + + // PermitWithoutStream allows keepalive pings even when there are no active streams. + // Default: true + PermitWithoutStream bool `json:"permitWithoutStream,omitempty" yaml:"permitWithoutStream,omitempty"` + + // Timeout is the duration to wait for a keepalive ping acknowledgment. + // Default: "180s" + Timeout string `json:"timeout,omitempty" yaml:"timeout,omitempty"` + + // IntervalTime is the duration between keepalive pings. + // Default: "10s" + IntervalTime string `json:"intervalTime,omitempty" yaml:"intervalTime,omitempty"` + + // MaxConnectionIdle is the maximum duration a connection can be idle before being closed. + // Default: infinity (not set) + MaxConnectionIdle string `json:"maxConnectionIdle,omitempty" yaml:"maxConnectionIdle,omitempty"` + + // MaxConnectionAge is the maximum age of a connection before it is closed. + // Default: infinity (not set) + MaxConnectionAge string `json:"maxConnectionAge,omitempty" yaml:"maxConnectionAge,omitempty"` + + // MaxConnectionAgeGrace is the grace period for closing connections that exceed MaxConnectionAge. + // Default: infinity (not set) + MaxConnectionAgeGrace string `json:"maxConnectionAgeGrace,omitempty" yaml:"maxConnectionAgeGrace,omitempty"` +} + +// Router represents the router configuration mapping. +// This is a map where keys are router names (e.g., "default", "router-1", "router-2") +// and values are RouterEntry structs containing endpoint and label information. +// This matches the YAML structure in the ConfigMap's "router" key. +type Router map[string]RouterEntry + +// RouterEntry defines a single router endpoint configuration. +type RouterEntry struct { + // Endpoint is the router's gRPC endpoint address (e.g., "router-0.example.com:443") + Endpoint string `json:"endpoint" yaml:"endpoint"` + + // Labels are optional labels to associate with this router entry. + // Used to distinguish between different router instances. + Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty"` +} + +// ParseDuration is a helper to parse duration strings with better error messages. +func ParseDuration(s string) (time.Duration, error) { + if s == "" { + return 0, nil + } + return time.ParseDuration(s) +} diff --git a/controller/internal/config/types_test.go b/controller/internal/config/types_test.go new file mode 100644 index 000000000..a36e8ce30 --- /dev/null +++ b/controller/internal/config/types_test.go @@ -0,0 +1,206 @@ +package config + +import ( + "testing" + + apiserverv1beta1 "k8s.io/apiserver/pkg/apis/apiserver/v1beta1" + "sigs.k8s.io/yaml" +) + +func TestConfigRoundTrip(t *testing.T) { + // Create a config struct + original := Config{ + Authentication: Authentication{ + Internal: Internal{ + Prefix: "internal:", + TokenLifetime: "43800h", + }, + K8s: K8s{ + Enabled: true, + }, + JWT: []apiserverv1beta1.JWTAuthenticator{}, // Empty array + }, + Provisioning: Provisioning{ + Enabled: false, + }, + Grpc: Grpc{ + Keepalive: Keepalive{ + MinTime: "1s", + PermitWithoutStream: true, + Timeout: "180s", + IntervalTime: "10s", + }, + }, + } + + // Marshal to YAML + yamlData, err := yaml.Marshal(original) + if err != nil { + t.Fatalf("Failed to marshal config: %v", err) + } + + // Unmarshal back to struct + var parsed Config + err = yaml.Unmarshal(yamlData, &parsed) + if err != nil { + t.Fatalf("Failed to unmarshal config: %v", err) + } + + // Verify key fields + if parsed.Authentication.Internal.Prefix != original.Authentication.Internal.Prefix { + t.Errorf("Internal prefix mismatch: got %s, want %s", + parsed.Authentication.Internal.Prefix, original.Authentication.Internal.Prefix) + } + + if parsed.Grpc.Keepalive.MinTime != original.Grpc.Keepalive.MinTime { + t.Errorf("Keepalive minTime mismatch: got %s, want %s", + parsed.Grpc.Keepalive.MinTime, original.Grpc.Keepalive.MinTime) + } + + if parsed.Grpc.Keepalive.PermitWithoutStream != original.Grpc.Keepalive.PermitWithoutStream { + t.Errorf("Keepalive permitWithoutStream mismatch: got %v, want %v", + parsed.Grpc.Keepalive.PermitWithoutStream, original.Grpc.Keepalive.PermitWithoutStream) + } +} + +func TestRouterRoundTrip(t *testing.T) { + // Create a router config + original := Router{ + "default": RouterEntry{ + Endpoint: "router-0.example.com:443", + }, + "router-1": RouterEntry{ + Endpoint: "router-1.example.com:443", + Labels: map[string]string{ + "router-index": "1", + }, + }, + "router-2": RouterEntry{ + Endpoint: "router-2.example.com:443", + Labels: map[string]string{ + "router-index": "2", + "zone": "us-east", + }, + }, + } + + // Marshal to YAML + yamlData, err := yaml.Marshal(original) + if err != nil { + t.Fatalf("Failed to marshal router: %v", err) + } + + t.Logf("Generated YAML:\n%s", string(yamlData)) + + // Unmarshal back to struct + var parsed Router + err = yaml.Unmarshal(yamlData, &parsed) + if err != nil { + t.Fatalf("Failed to unmarshal router: %v", err) + } + + // Verify all routers exist + if len(parsed) != len(original) { + t.Errorf("Router count mismatch: got %d, want %d", len(parsed), len(original)) + } + + // Verify default router + if entry, exists := parsed["default"]; !exists { + t.Error("Missing 'default' router") + } else if entry.Endpoint != original["default"].Endpoint { + t.Errorf("Default router endpoint mismatch: got %s, want %s", + entry.Endpoint, original["default"].Endpoint) + } + + // Verify router-1 + if entry, exists := parsed["router-1"]; !exists { + t.Error("Missing 'router-1' router") + } else { + if entry.Endpoint != original["router-1"].Endpoint { + t.Errorf("Router-1 endpoint mismatch: got %s, want %s", + entry.Endpoint, original["router-1"].Endpoint) + } + if entry.Labels["router-index"] != "1" { + t.Errorf("Router-1 index label mismatch: got %s, want 1", + entry.Labels["router-index"]) + } + } + + // Verify router-2 labels + if entry, exists := parsed["router-2"]; !exists { + t.Error("Missing 'router-2' router") + } else { + if len(entry.Labels) != 2 { + t.Errorf("Router-2 label count mismatch: got %d, want 2", len(entry.Labels)) + } + } +} + +func TestParseYAMLToRouter(t *testing.T) { + // Test parsing actual YAML string (like from ConfigMap) + yamlInput := ` +default: + endpoint: router.example.com:443 +router-1: + endpoint: router-1.example.com:443 + labels: + router-index: "1" +router-2: + endpoint: router-2.example.com:443 + labels: + router-index: "2" +` + + var router Router + err := yaml.Unmarshal([]byte(yamlInput), &router) + if err != nil { + t.Fatalf("Failed to unmarshal YAML: %v", err) + } + + // Verify structure + if len(router) != 3 { + t.Errorf("Expected 3 routers, got %d", len(router)) + } + + // Verify default has no labels + if defaultEntry, exists := router["default"]; exists { + if len(defaultEntry.Labels) != 0 { + t.Errorf("Default router should have no labels, got %d", len(defaultEntry.Labels)) + } + } + + // Verify router-1 has labels + if router1, exists := router["router-1"]; exists { + if len(router1.Labels) == 0 { + t.Error("Router-1 should have labels") + } + } +} + +func TestParseDuration(t *testing.T) { + tests := []struct { + input string + wantErr bool + expected string + }{ + {"1s", false, "1s"}, + {"10s", false, "10s"}, + {"1m", false, "1m0s"}, + {"1h", false, "1h0m0s"}, + {"", false, "0s"}, // empty string returns 0 + {"invalid", true, ""}, + } + + for _, tt := range tests { + t.Run(tt.input, func(t *testing.T) { + duration, err := ParseDuration(tt.input) + if (err != nil) != tt.wantErr { + t.Errorf("ParseDuration(%q) error = %v, wantErr %v", tt.input, err, tt.wantErr) + return + } + if !tt.wantErr && duration.String() != tt.expected { + t.Errorf("ParseDuration(%q) = %v, want %v", tt.input, duration, tt.expected) + } + }) + } +} diff --git a/controller/internal/controller/client_controller.go b/controller/internal/controller/client_controller.go new file mode 100644 index 000000000..d1e7a564e --- /dev/null +++ b/controller/internal/controller/client_controller.go @@ -0,0 +1,112 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + kclient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + + jumpstarterdevv1alpha1 "github.com/jumpstarter-dev/jumpstarter-controller/api/v1alpha1" + "github.com/jumpstarter-dev/jumpstarter-controller/internal/oidc" +) + +// ClientReconciler reconciles a Client object +type ClientReconciler struct { + kclient.Client + Scheme *runtime.Scheme + Signer *oidc.Signer +} + +// +kubebuilder:rbac:groups=jumpstarter.dev,resources=clients,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=jumpstarter.dev,resources=clients/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=jumpstarter.dev,resources=clients/finalizers,verbs=update + +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.18.2/pkg/reconcile +func (r *ClientReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger := log.FromContext(ctx) + + var client jumpstarterdevv1alpha1.Client + if err := r.Get(ctx, req.NamespacedName, &client); err != nil { + return ctrl.Result{}, kclient.IgnoreNotFound( + fmt.Errorf("Reconcile: failed to get client: %w", err), + ) + } + + original := kclient.MergeFrom(client.DeepCopy()) + + if err := r.reconcileStatusCredential(ctx, &client); err != nil { + return ctrl.Result{}, err + } + + if err := r.reconcileStatusEndpoint(ctx, &client); err != nil { + return ctrl.Result{}, err + } + + if err := r.Status().Patch(ctx, &client, original); err != nil { + return RequeueConflict(logger, ctrl.Result{}, err) + } + + return ctrl.Result{}, nil +} + +func (r *ClientReconciler) reconcileStatusCredential( + ctx context.Context, + client *jumpstarterdevv1alpha1.Client, +) error { + secret, err := ensureSecret(ctx, kclient.ObjectKey{ + Name: client.Name + "-client", + Namespace: client.Namespace, + }, r.Client, r.Scheme, r.Signer, client.InternalSubject(), client) + if err != nil { + return fmt.Errorf("reconcileStatusCredential: failed to prepare credential for client: %w", err) + } + client.Status.Credential = &corev1.LocalObjectReference{ + Name: secret.Name, + } + return nil +} + +// nolint:unparam +func (r *ClientReconciler) reconcileStatusEndpoint( + ctx context.Context, + client *jumpstarterdevv1alpha1.Client, +) error { + logger := log.FromContext(ctx) + + endpoint := controllerEndpoint() + if client.Status.Endpoint != endpoint { + logger.Info("reconcileStatusEndpoint: updating controller endpoint") + client.Status.Endpoint = endpoint + } + + return nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *ClientReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&jumpstarterdevv1alpha1.Client{}). + Owns(&corev1.Secret{}). + Complete(r) +} diff --git a/controller/internal/controller/client_controller_test.go b/controller/internal/controller/client_controller_test.go new file mode 100644 index 000000000..e704f7055 --- /dev/null +++ b/controller/internal/controller/client_controller_test.go @@ -0,0 +1,169 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + jumpstarterdevv1alpha1 "github.com/jumpstarter-dev/jumpstarter-controller/api/v1alpha1" + "github.com/jumpstarter-dev/jumpstarter-controller/internal/oidc" +) + +var _ = Describe("Identity Controller", func() { + Context("When reconciling a resource", func() { + const resourceName = "test-resource" + + ctx := context.Background() + + typeNamespacedName := types.NamespacedName{ + Name: resourceName, + Namespace: "default", // TODO(user):Modify as needed + } + client := &jumpstarterdevv1alpha1.Client{} + + BeforeEach(func() { + By("creating the custom resource for the Kind Client") + err := k8sClient.Get(ctx, typeNamespacedName, client) + if err != nil && errors.IsNotFound(err) { + resource := &jumpstarterdevv1alpha1.Client{ + ObjectMeta: metav1.ObjectMeta{ + Name: resourceName, + Namespace: "default", + }, + // TODO(user): Specify other spec details if needed. + } + Expect(k8sClient.Create(ctx, resource)).To(Succeed()) + } + }) + + AfterEach(func() { + // TODO(user): Cleanup logic after each test, like removing the resource instance. + resource := &jumpstarterdevv1alpha1.Client{} + err := k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + By("Cleanup the specific resource instance Identity") + Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) + + // the cascade delete of secrets does not work on test env + // https://book.kubebuilder.io/reference/envtest#testing-considerations + Expect(k8sClient.Delete(ctx, &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: resourceName + "-client", + Namespace: "default", + }, + })).To(Succeed()) + }) + It("should successfully reconcile the resource", func() { + By("Reconciling the created resource") + signer, err := oidc.NewSignerFromSeed([]byte{}, "https://example.com", "dummy") + Expect(err).NotTo(HaveOccurred()) + + controllerReconciler := &ClientReconciler{ + Client: k8sClient, + Scheme: k8sClient.Scheme(), + Signer: signer, + } + + _, err = controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + // TODO(user): Add more specific assertions depending on your controller's reconciliation logic. + // Example: If you expect a certain status condition after reconciliation, verify it here. + }) + + It("should reconcile a missing token secret", func() { + By("recreating the secret") + signer, err := oidc.NewSignerFromSeed([]byte{}, "https://example.com", "dummy") + Expect(err).NotTo(HaveOccurred()) + + controllerReconciler := &ClientReconciler{ + Client: k8sClient, + Scheme: k8sClient.Scheme(), + Signer: signer, + } + + // point the client to a non-existing secret + client := &jumpstarterdevv1alpha1.Client{} + Expect(k8sClient.Get(ctx, typeNamespacedName, client)).To(Succeed()) + + client.Status.Credential = &corev1.LocalObjectReference{Name: "non-existing-secret"} + Expect(k8sClient.Status().Update(ctx, client)).To(Succeed()) + + _, err = controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + + By("verifying the secret was created") + secret := &corev1.Secret{} + Expect(k8sClient.Get(ctx, types.NamespacedName{ + Namespace: "default", + Name: resourceName + "-client", + }, secret)).To(Succeed()) + }) + + It("should reconcile an invalid token secret", func() { + By("recreating the secret") + signer, err := oidc.NewSignerFromSeed([]byte{}, "https://example.com", "dummy") + Expect(err).NotTo(HaveOccurred()) + + controllerReconciler := &ClientReconciler{ + Client: k8sClient, + Scheme: k8sClient.Scheme(), + Signer: signer, + } + + _, err = controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + + // modify the secret to something invalid + secret := &corev1.Secret{} + Expect(k8sClient.Get(ctx, types.NamespacedName{ + Namespace: "default", + Name: resourceName + "-client", + }, secret)).To(Succeed()) + secret.Data[TokenKey] = []byte("invalid") + Expect(k8sClient.Update(ctx, secret)).To(Succeed()) + + _, err = controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + + By("verifying the secret was updated") + secret = &corev1.Secret{} + Expect(k8sClient.Get(ctx, types.NamespacedName{ + Namespace: "default", + Name: resourceName + "-client", + }, secret)).To(Succeed()) + Expect(secret.Data[TokenKey]).NotTo(Equal([]byte("invalid"))) + }) + }) +}) diff --git a/controller/internal/controller/endpoints.go b/controller/internal/controller/endpoints.go new file mode 100644 index 000000000..b5668a8c2 --- /dev/null +++ b/controller/internal/controller/endpoints.go @@ -0,0 +1,13 @@ +package controller + +import ( + "os" +) + +func controllerEndpoint() string { + ep := os.Getenv("GRPC_ENDPOINT") + if ep == "" { + return "localhost:8082" + } + return ep +} diff --git a/controller/internal/controller/errors.go b/controller/internal/controller/errors.go new file mode 100644 index 000000000..d9d56fd90 --- /dev/null +++ b/controller/internal/controller/errors.go @@ -0,0 +1,16 @@ +package controller + +import ( + "github.com/go-logr/logr" + apierrors "k8s.io/apimachinery/pkg/api/errors" + ctrl "sigs.k8s.io/controller-runtime" +) + +func RequeueConflict(logger logr.Logger, result ctrl.Result, err error) (ctrl.Result, error) { + if apierrors.IsConflict(err) { + logger.V(1).Info("Ignoring conflict error but requeuing the reconciliation request", "error", err) + return ctrl.Result{Requeue: true}, nil + } else { + return result, err + } +} diff --git a/controller/internal/controller/exporter_controller.go b/controller/internal/controller/exporter_controller.go new file mode 100644 index 000000000..0862f77c9 --- /dev/null +++ b/controller/internal/controller/exporter_controller.go @@ -0,0 +1,220 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "fmt" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + + jumpstarterdevv1alpha1 "github.com/jumpstarter-dev/jumpstarter-controller/api/v1alpha1" + "github.com/jumpstarter-dev/jumpstarter-controller/internal/oidc" +) + +// ExporterReconciler reconciles a Exporter object +type ExporterReconciler struct { + client.Client + Scheme *runtime.Scheme + Signer *oidc.Signer +} + +// +kubebuilder:rbac:groups=jumpstarter.dev,resources=exporters,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=jumpstarter.dev,resources=exporters/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=jumpstarter.dev,resources=exporters/finalizers,verbs=update +// +kubebuilder:rbac:groups=jumpstarter.dev,resources=exporteraccesspolicies,verbs=get;list;watch +// +kubebuilder:rbac:groups=core,resources=secrets,verbs=get;list;watch;create;update;patch;delete + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// TODO(user): Modify the Reconcile function to compare the state specified by +// the Exporter object against the actual cluster state, and then +// perform operations to make the cluster state reflect the state specified by +// the user. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.18.2/pkg/reconcile +func (r *ExporterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger := log.FromContext(ctx) + + var exporter jumpstarterdevv1alpha1.Exporter + if err := r.Get(ctx, req.NamespacedName, &exporter); err != nil { + return ctrl.Result{}, client.IgnoreNotFound( + fmt.Errorf("Reconcile: unable to get exporter: %w", err), + ) + } + + original := client.MergeFrom(exporter.DeepCopy()) + + if err := r.reconcileStatusCredential(ctx, &exporter); err != nil { + return ctrl.Result{}, err + } + + if err := r.reconcileStatusLeaseRef(ctx, &exporter); err != nil { + return ctrl.Result{}, err + } + + result, err := r.reconcileStatusConditionsOnline(ctx, &exporter) + if err != nil { + return ctrl.Result{}, err + } + + if err := r.reconcileStatusEndpoint(ctx, &exporter); err != nil { + return ctrl.Result{}, err + } + + if err := r.Status().Patch(ctx, &exporter, original); err != nil { + return RequeueConflict(logger, ctrl.Result{}, err) + } + + return result, nil +} + +func (r *ExporterReconciler) reconcileStatusCredential( + ctx context.Context, + exporter *jumpstarterdevv1alpha1.Exporter, +) error { + secret, err := ensureSecret(ctx, client.ObjectKey{ + Name: exporter.Name + "-exporter", + Namespace: exporter.Namespace, + }, r.Client, r.Scheme, r.Signer, exporter.InternalSubject(), exporter) + if err != nil { + return fmt.Errorf("reconcileStatusCredential: failed to prepare credential for exporter: %w", err) + } + exporter.Status.Credential = &corev1.LocalObjectReference{ + Name: secret.Name, + } + return nil +} + +func (r *ExporterReconciler) reconcileStatusLeaseRef( + ctx context.Context, + exporter *jumpstarterdevv1alpha1.Exporter, +) error { + var leases jumpstarterdevv1alpha1.LeaseList + if err := r.List( + ctx, + &leases, + client.InNamespace(exporter.Namespace), + MatchingActiveLeases(), + ); err != nil { + return fmt.Errorf("reconcileStatusLeaseRef: failed to list active leases: %w", err) + } + + exporter.Status.LeaseRef = nil + for _, lease := range leases.Items { + if !lease.Status.Ended && lease.Status.ExporterRef != nil { + if lease.Status.ExporterRef.Name == exporter.Name { + exporter.Status.LeaseRef = &corev1.LocalObjectReference{ + Name: lease.Name, + } + } + } + } + + return nil +} + +// nolint:unparam +func (r *ExporterReconciler) reconcileStatusEndpoint( + ctx context.Context, + exporter *jumpstarterdevv1alpha1.Exporter, +) error { + logger := log.FromContext(ctx) + + endpoint := controllerEndpoint() + if exporter.Status.Endpoint != endpoint { + logger.Info("reconcileStatusEndpoint: updating controller endpoint") + exporter.Status.Endpoint = endpoint + } + + return nil +} + +// nolint:unparam +func (r *ExporterReconciler) reconcileStatusConditionsOnline( + _ context.Context, + exporter *jumpstarterdevv1alpha1.Exporter, +) (ctrl.Result, error) { + var requeueAfter time.Duration = 0 + + if exporter.Status.LastSeen.IsZero() { + meta.SetStatusCondition(&exporter.Status.Conditions, metav1.Condition{ + Type: string(jumpstarterdevv1alpha1.ExporterConditionTypeOnline), + Status: metav1.ConditionFalse, + ObservedGeneration: exporter.Generation, + Reason: "Seen", + Message: "Never seen", + }) + // marking the exporter offline, no need to requeue + } else if time.Since(exporter.Status.LastSeen.Time) > time.Minute { + meta.SetStatusCondition(&exporter.Status.Conditions, metav1.Condition{ + Type: string(jumpstarterdevv1alpha1.ExporterConditionTypeOnline), + Status: metav1.ConditionFalse, + ObservedGeneration: exporter.Generation, + Reason: "Seen", + Message: "Last seen more than 1 minute ago", + }) + // marking the exporter offline, no need to requeue + } else { + meta.SetStatusCondition(&exporter.Status.Conditions, metav1.Condition{ + Type: string(jumpstarterdevv1alpha1.ExporterConditionTypeOnline), + Status: metav1.ConditionTrue, + ObservedGeneration: exporter.Generation, + Reason: "Seen", + Message: "Last seen less than 1 minute ago", + }) + // marking the exporter online, requeue after 30 seconds + requeueAfter = time.Second * 30 + } + + if exporter.Status.Devices == nil { + meta.SetStatusCondition(&exporter.Status.Conditions, metav1.Condition{ + Type: string(jumpstarterdevv1alpha1.ExporterConditionTypeRegistered), + Status: metav1.ConditionFalse, + ObservedGeneration: exporter.Generation, + Reason: "Unregister", + }) + } else { + meta.SetStatusCondition(&exporter.Status.Conditions, metav1.Condition{ + Type: string(jumpstarterdevv1alpha1.ExporterConditionTypeRegistered), + Status: metav1.ConditionTrue, + ObservedGeneration: exporter.Generation, + Reason: "Register", + }) + } + + return ctrl.Result{ + RequeueAfter: requeueAfter, + }, nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *ExporterReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&jumpstarterdevv1alpha1.Exporter{}). + Owns(&jumpstarterdevv1alpha1.Lease{}). + Owns(&corev1.Secret{}). + Complete(r) +} diff --git a/controller/internal/controller/exporter_controller_test.go b/controller/internal/controller/exporter_controller_test.go new file mode 100644 index 000000000..9449993a9 --- /dev/null +++ b/controller/internal/controller/exporter_controller_test.go @@ -0,0 +1,129 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + jumpstarterdevv1alpha1 "github.com/jumpstarter-dev/jumpstarter-controller/api/v1alpha1" + "github.com/jumpstarter-dev/jumpstarter-controller/internal/oidc" +) + +var _ = Describe("Exporter Controller", func() { + Context("When reconciling a resource", func() { + const resourceName = "test-resource" + + ctx := context.Background() + + typeNamespacedName := types.NamespacedName{ + Name: resourceName, + Namespace: "default", // TODO(user):Modify as needed + } + exporter := &jumpstarterdevv1alpha1.Exporter{} + + BeforeEach(func() { + By("creating the custom resource for the Kind Exporter") + err := k8sClient.Get(ctx, typeNamespacedName, exporter) + if err != nil && errors.IsNotFound(err) { + resource := &jumpstarterdevv1alpha1.Exporter{ + ObjectMeta: metav1.ObjectMeta{ + Name: resourceName, + Namespace: "default", + }, + // TODO(user): Specify other spec details if needed. + } + Expect(k8sClient.Create(ctx, resource)).To(Succeed()) + } + }) + + AfterEach(func() { + // TODO(user): Cleanup logic after each test, like removing the resource instance. + resource := &jumpstarterdevv1alpha1.Exporter{} + err := k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + By("Cleanup the specific resource instance Exporter") + Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) + + // the cascade delete of secrets does not work on test env + // https://book.kubebuilder.io/reference/envtest#testing-considerations + Expect(k8sClient.Delete(ctx, &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: resourceName + "-exporter", + Namespace: "default", + }, + })).To(Succeed()) + }) + It("should successfully reconcile the resource", func() { + By("Reconciling the created resource") + signer, err := oidc.NewSignerFromSeed([]byte{}, "https://example.com", "dummy") + Expect(err).NotTo(HaveOccurred()) + + controllerReconciler := &ExporterReconciler{ + Client: k8sClient, + Scheme: k8sClient.Scheme(), + Signer: signer, + } + + _, err = controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + // TODO(user): Add more specific assertions depending on your controller's reconciliation logic. + // Example: If you expect a certain status condition after reconciliation, verify it here. + }) + It("should reconcile a missing token secret", func() { + By("recreating the secret") + signer, err := oidc.NewSignerFromSeed([]byte{}, "https://example.com", "dummy") + Expect(err).NotTo(HaveOccurred()) + + controllerReconciler := &ExporterReconciler{ + Client: k8sClient, + Scheme: k8sClient.Scheme(), + Signer: signer, + } + + // point the client to a non-existing secret + exporter := &jumpstarterdevv1alpha1.Exporter{} + Expect(k8sClient.Get(ctx, typeNamespacedName, exporter)).To(Succeed()) + + exporter.Status.Credential = &corev1.LocalObjectReference{Name: "non-existing-secret"} + Expect(k8sClient.Status().Update(ctx, exporter)).To(Succeed()) + + _, err = controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + + By("verifying the secret was created") + secret := &corev1.Secret{} + Expect(k8sClient.Get(ctx, types.NamespacedName{ + Namespace: "default", + Name: resourceName + "-exporter", + }, secret)).To(Succeed()) + }) + }) +}) diff --git a/controller/internal/controller/lease.go b/controller/internal/controller/lease.go new file mode 100644 index 000000000..83a96bd71 --- /dev/null +++ b/controller/internal/controller/lease.go @@ -0,0 +1,26 @@ +package controller + +import ( + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + jumpstarterdevv1alpha1 "github.com/jumpstarter-dev/jumpstarter-controller/api/v1alpha1" +) + +func MatchingActiveLeases() client.ListOption { + // TODO: use field selector once KEP-4358 is stabilized + // Reference: https://github.com/kubernetes/kubernetes/pull/122717 + requirement, err := labels.NewRequirement( + string(jumpstarterdevv1alpha1.LeaseLabelEnded), + selection.DoesNotExist, + []string{}, + ) + + utilruntime.Must(err) + + return client.MatchingLabelsSelector{ + Selector: labels.Everything().Add(*requirement), + } +} diff --git a/controller/internal/controller/lease_controller.go b/controller/internal/controller/lease_controller.go new file mode 100644 index 000000000..eb5c7596f --- /dev/null +++ b/controller/internal/controller/lease_controller.go @@ -0,0 +1,517 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "fmt" + "slices" + "strings" + "time" + + jumpstarterdevv1alpha1 "github.com/jumpstarter-dev/jumpstarter-controller/api/v1alpha1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +// LeaseReconciler reconciles a Lease object +type LeaseReconciler struct { + client.Client + Scheme *runtime.Scheme +} + +// ApprovedExporter represents an exporter that has been approved for leasing, +// along with its associated policy and any existing lease. +type ApprovedExporter struct { + // Exporter is the approved exporter + Exporter jumpstarterdevv1alpha1.Exporter + // ExistingLease is a pointer to any existing lease for this exporter, or nil if none exists + ExistingLease *jumpstarterdevv1alpha1.Lease + // Policy represents the access policy that approved this exporter + Policy jumpstarterdevv1alpha1.Policy +} + +// +kubebuilder:rbac:groups=jumpstarter.dev,resources=leases,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=jumpstarter.dev,resources=leases/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=jumpstarter.dev,resources=leases/finalizers,verbs=update + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// TODO(user): Modify the Reconcile function to compare the state specified by +// the Lease object against the actual cluster state, and then +// perform operations to make the cluster state reflect the state specified by +// the user. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.18.4/pkg/reconcile +func (r *LeaseReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger := log.FromContext(ctx) + ctx = ctrl.LoggerInto(ctx, logger) + + var lease jumpstarterdevv1alpha1.Lease + if err := r.Get(ctx, req.NamespacedName, &lease); err != nil { + return ctrl.Result{}, client.IgnoreNotFound( + fmt.Errorf("Reconcile: unable to get lease: %w", err), + ) + } + + var result ctrl.Result + if err := r.reconcileStatusExporterRef(ctx, &result, &lease); err != nil { + return result, err + } + + if err := r.reconcileStatusBeginEndTimes(ctx, &lease); err != nil { + return result, err + } + + if err := r.reconcileStatusEnded(ctx, &result, &lease); err != nil { + return result, err + } + + if err := r.Status().Update(ctx, &lease); err != nil { + return RequeueConflict(logger, result, err) + } + + if lease.Labels == nil { + lease.Labels = make(map[string]string) + } + if lease.Status.Ended { + lease.Labels[string(jumpstarterdevv1alpha1.LeaseLabelEnded)] = jumpstarterdevv1alpha1.LeaseLabelEndedValue + } + + if lease.Status.ExporterRef != nil { + var exporter jumpstarterdevv1alpha1.Exporter + if err := r.Get(ctx, types.NamespacedName{ + Namespace: lease.Namespace, + Name: lease.Status.ExporterRef.Name, + }, &exporter); err != nil { + return result, err + } + if err := controllerutil.SetControllerReference(&exporter, &lease, r.Scheme); err != nil { + return result, fmt.Errorf("Reconcile: failed to update lease controller reference: %w", err) + } + } + + if err := r.Update(ctx, &lease); err != nil { + return RequeueConflict(logger, result, fmt.Errorf("Reconcile: failed to update lease metadata: %w", err)) + } + + return result, nil +} + +// also manages EndTime and LeaseConditionTypeReady +// nolint:unparam +func (r *LeaseReconciler) reconcileStatusEnded( + ctx context.Context, + result *ctrl.Result, + lease *jumpstarterdevv1alpha1.Lease, +) error { + + now := time.Now() + if !lease.Status.Ended { + // if lease has status condition unsatisfiable or invalid, we mark it as ended to avoid reprocessing + if meta.IsStatusConditionTrue(lease.Status.Conditions, string(jumpstarterdevv1alpha1.LeaseConditionTypeUnsatisfiable)) || + meta.IsStatusConditionTrue(lease.Status.Conditions, string(jumpstarterdevv1alpha1.LeaseConditionTypeInvalid)) { + lease.Status.Ended = true + lease.Status.EndTime = &metav1.Time{Time: now} + return nil + } else if lease.Spec.Release { + lease.Release(ctx) + return nil + } else if lease.Status.BeginTime != nil { + var expiration time.Time + if lease.Spec.EndTime != nil { + // expires at Spec.EndTime when specified + expiration = lease.Spec.EndTime.Time + } else if lease.Spec.BeginTime != nil && lease.Spec.Duration != nil { + // expires at Spec.BeginTime + Spec.Duration - scheduled lease + expiration = lease.Spec.BeginTime.Add(lease.Spec.Duration.Duration) + } else if lease.Spec.Duration != nil { + // expires at actual BeginTime + Spec.Duration - immediate lease + expiration = lease.Status.BeginTime.Add(lease.Spec.Duration.Duration) + } + + if expiration.Before(now) { + lease.Expire(ctx) + return nil + } + result.RequeueAfter = expiration.Sub(now) + return nil + } + + } + return nil +} + +// nolint:unparam +func (r *LeaseReconciler) reconcileStatusBeginEndTimes( + ctx context.Context, + lease *jumpstarterdevv1alpha1.Lease, +) error { + if lease.Status.BeginTime == nil && lease.Status.ExporterRef != nil { + logger := log.FromContext(ctx) + logger.Info("Updating begin time for lease", "lease", lease.Name, "exporter", lease.GetExporterName(), "client", lease.GetClientName()) + now := time.Now() + lease.Status.BeginTime = &metav1.Time{Time: now} + lease.SetStatusReady(true, "Ready", "An exporter has been acquired for the client") + } + + return nil +} + +// Also manages LeaseConditionTypeUnsatisfiable and LeaseConditionTypePending +func (r *LeaseReconciler) reconcileStatusExporterRef( + ctx context.Context, + result *ctrl.Result, + lease *jumpstarterdevv1alpha1.Lease, +) error { + logger := log.FromContext(ctx) + + // Do not attempt to reconcile if the lease is already ended/invalid/etc + if lease.Status.Ended { + return nil + } + + if lease.Status.ExporterRef == nil { + // For scheduled leases: only assign exporter if requested BeginTime has arrived + if lease.Spec.BeginTime != nil { + now := time.Now() + if lease.Spec.BeginTime.After(now) { + // Requested BeginTime is in the future, wait until then + waitDuration := lease.Spec.BeginTime.Sub(now) + logger.Info("Lease is scheduled for the future, waiting", + "lease", lease.Name, + "requestedBeginTime", lease.Spec.BeginTime, + "waitDuration", waitDuration) + result.RequeueAfter = waitDuration + return nil + } + } + logger.Info("Looking for a matching exporter for lease", "lease", lease.Name, "client", lease.GetClientName(), "selector", lease.Spec.Selector) + + selector, err := lease.GetExporterSelector() + if err != nil { + return fmt.Errorf("reconcileStatusExporterRef: failed to get exporter selector: %w", err) + } else if selector.Empty() { + lease.SetStatusInvalid("InvalidSelector", "The selector for the lease is empty, a selector is required") + return nil + } + + // List all Exporter matching selector + matchingExporters, err := r.ListMatchingExporters(ctx, lease, selector) + if err != nil { + return fmt.Errorf("reconcileStatusExporterRef: failed to list matching exporters: %w", err) + } + + approvedExporters, err := r.attachMatchingPolicies(ctx, lease, matchingExporters.Items) + if err != nil { + return fmt.Errorf("reconcileStatusExporterRef: failed to handle policy approval: %w", err) + } + + if len(approvedExporters) == 0 { + lease.SetStatusUnsatisfiable( + "NoAccess", + "While there are %d exporters matching the selector, none of them are approved by any policy for your client", + len(matchingExporters.Items), + ) + return nil + } + + onlineApprovedExporters := filterOutOfflineExporters(approvedExporters) + if len(onlineApprovedExporters) == 0 { + lease.SetStatusPending( + "Offline", + "While there are %d available exporters (i.e. %s), none of them are online", + len(approvedExporters), + approvedExporters[0].Exporter.Name, + ) + result.RequeueAfter = time.Second + return nil + } + + // Filter out exporters that are already leased + activeLeases, err := r.ListActiveLeases(ctx, lease.Namespace) + if err != nil { + return fmt.Errorf("reconcileStatusExporterRef: failed to list active leases: %w", err) + } + + onlineApprovedExporters = attachExistingLeases(onlineApprovedExporters, activeLeases.Items) + orderedExporters := orderApprovedExporters(onlineApprovedExporters) + + if len(orderedExporters) > 0 && orderedExporters[0].Policy.SpotAccess { + lease.SetStatusUnsatisfiable("SpotAccess", + "The only possible exporters are under spot access (i.e. %s), but spot access is still not implemented", + orderedExporters[0].Exporter.Name) + return nil + } + + availableExporters := filterOutLeasedExporters(onlineApprovedExporters) + if len(availableExporters) == 0 { + lease.SetStatusPending("NotAvailable", + "There are %d approved exporters, (i.e. %s) but all of them are already leased", + len(onlineApprovedExporters), + onlineApprovedExporters[0].Exporter.Name, + ) + result.RequeueAfter = time.Second + return nil + } + + // TODO: here there's room for improvement, i.e. we could have multiple + // clients trying to lease the same exporters, we should look at priorities + // and spot access to decide which client gets the exporter, this probably means + // that we will need to construct a lease scheduler with the view of all leases + // and exporters in the system, and (maybe) a priority queue for the leases. + + // For now, we just select the best available exporter without considering other + // ongoing lease requests + + selected := availableExporters[0] + + if selected.ExistingLease != nil { + // TODO: Implement eviction of spot access leases + lease.SetStatusPending("NotAvailable", + "Exporter %s is already leased by another client under spot access, but spot access eviction still not implemented", + selected.Exporter.Name) + result.RequeueAfter = time.Second + return nil + } + + lease.Status.Priority = selected.Policy.Priority + lease.Status.SpotAccess = selected.Policy.SpotAccess + lease.Status.ExporterRef = &corev1.LocalObjectReference{ + Name: selected.Exporter.Name, + } + return nil + } + + return nil +} + +// attachMatchingPolicies attaches the matching policies to the list of online exporters +// if the exporter matches the policy and the client matches the policy's client selector +// the exporter is approved for leasing +func (r *LeaseReconciler) attachMatchingPolicies(ctx context.Context, lease *jumpstarterdevv1alpha1.Lease, onlineExporters []jumpstarterdevv1alpha1.Exporter) ([]ApprovedExporter, error) { + var approvedExporters []ApprovedExporter + + var policies jumpstarterdevv1alpha1.ExporterAccessPolicyList + if err := r.List(ctx, &policies, + client.InNamespace(lease.Namespace), + ); err != nil { + return nil, fmt.Errorf("reconcileStatusExporterRef: failed to list exporter access policies: %w", err) + } + + // If there are no policies, we just approve all online exporters + if len(policies.Items) == 0 { + for _, exporter := range onlineExporters { + approvedExporters = append(approvedExporters, ApprovedExporter{ + Exporter: exporter, + Policy: jumpstarterdevv1alpha1.Policy{ + Priority: 0, + SpotAccess: false, + }, + }) + } + return approvedExporters, nil + } + // If policies exist: get the client to obtain the metadata necessary for policy matching + var jclient jumpstarterdevv1alpha1.Client + if err := r.Get(ctx, types.NamespacedName{ + Namespace: lease.Namespace, + Name: lease.Spec.ClientRef.Name, + }, &jclient); err != nil { + return nil, fmt.Errorf("reconcileStatusExporterRef: failed to get client: %w", err) + } + + for _, exporter := range onlineExporters { + for _, policy := range policies.Items { + exporterSelector, err := metav1.LabelSelectorAsSelector(&policy.Spec.ExporterSelector) + if err != nil { + return nil, fmt.Errorf("reconcileStatusExporterRef: failed to convert exporter selector: %w", err) + } + if exporterSelector.Matches(labels.Set(exporter.Labels)) { + for _, p := range policy.Spec.Policies { + for _, from := range p.From { + clientSelector, err := metav1.LabelSelectorAsSelector(&from.ClientSelector) + if err != nil { + return nil, fmt.Errorf("reconcileStatusExporterRef: failed to convert client selector: %w", err) + } + if clientSelector.Matches(labels.Set(jclient.Labels)) { + if p.MaximumDuration != nil { + // Calculate requested duration (may be from explicit Duration or computed from times) + requestedDuration := time.Duration(0) + if lease.Spec.Duration != nil { + requestedDuration = lease.Spec.Duration.Duration + } else if lease.Spec.BeginTime != nil && lease.Spec.EndTime != nil { + requestedDuration = lease.Spec.EndTime.Sub(lease.Spec.BeginTime.Time) + } + if requestedDuration > p.MaximumDuration.Duration { + // TODO: we probably should keep this on the list of approved exporters + // but mark as excessive duration so we can report it on the status + // of lease if no other options exist + continue + } + } + approvedExporters = append(approvedExporters, ApprovedExporter{ + Exporter: exporter, + Policy: p, + }) + } + } + } + } + } + } + return approvedExporters, nil +} + +// ListMatchingExporters returns a list of exporters that match the selector of the lease +func (r *LeaseReconciler) ListMatchingExporters(ctx context.Context, lease *jumpstarterdevv1alpha1.Lease, + selector labels.Selector) (*jumpstarterdevv1alpha1.ExporterList, error) { + + var matchingExporters jumpstarterdevv1alpha1.ExporterList + if err := r.List( + ctx, + &matchingExporters, + client.InNamespace(lease.Namespace), + client.MatchingLabelsSelector{Selector: selector}, + ); err != nil { + return nil, fmt.Errorf("ListMatchingExporters: failed to list exporters matching selector: %w", err) + } + return &matchingExporters, nil +} + +// ListActiveLeases returns a list of active leases in the namespace +func (r *LeaseReconciler) ListActiveLeases(ctx context.Context, namespace string) (*jumpstarterdevv1alpha1.LeaseList, error) { + var activeLeases jumpstarterdevv1alpha1.LeaseList + if err := r.List( + ctx, + &activeLeases, + client.InNamespace(namespace), + MatchingActiveLeases(), + ); err != nil { + return nil, err + } + return &activeLeases, nil +} + +// attachExistingLeases attaches the existing leases to the approved exporter list +// if the activeLeases slice contains a lease that references the exporter in the +// approved exporter list +func attachExistingLeases(exporters []ApprovedExporter, activeLeases []jumpstarterdevv1alpha1.Lease) []ApprovedExporter { + for i, exporter := range exporters { + for _, existingLease := range activeLeases { + if existingLease.Status.ExporterRef != nil && + existingLease.Status.ExporterRef.Name == exporter.Exporter.Name { + exporters[i].ExistingLease = &existingLease + } + } + } + return exporters +} + +// orderAvailableExporters orders the exporters in the following order +// 1. Not being leased +// 2. Not accessible under spot access +// 3. Highest priority +// 4. Alphabetically by exporter name + +func orderApprovedExporters(exporters []ApprovedExporter) []ApprovedExporter { + // Order by lease status, priority, spot access, and name + + cmpFunc := func(a, b ApprovedExporter) int { + // If one of the exporters has an existing lease, we want to prioritize the one that doesn't + if a.ExistingLease != nil && b.ExistingLease == nil { + return 1 + } else if a.ExistingLease == nil && b.ExistingLease != nil { + return -1 + } + + // We want spot access policies to be later on the returned array + if a.Policy.SpotAccess != b.Policy.SpotAccess { + if a.Policy.SpotAccess { + return 1 + } + return -1 + } + + // We want the highest priority to be first + if a.Policy.Priority != b.Policy.Priority { + return b.Policy.Priority - a.Policy.Priority + } + + // If the priority is the same, we want to sort by exporter name + return strings.Compare(a.Exporter.Name, b.Exporter.Name) + } + + slices.SortFunc(exporters, cmpFunc) + + return exporters +} + +// filterOutLeasedExporters filters out the exporters that are already leased +func filterOutLeasedExporters(exporters []ApprovedExporter) []ApprovedExporter { + // Exclude exporter that are already leased and non-takeable + return slices.DeleteFunc(exporters, func(ae ApprovedExporter) bool { + existingLease := ae.ExistingLease + if existingLease == nil { + return false + } + + weHaveNonSpotAccess := !ae.Policy.SpotAccess + + // There is an existing lease, but, if it's spot access we can take it + if weHaveNonSpotAccess && ae.ExistingLease.Status.SpotAccess { + return false + } + + // ok, there is an existing lease, and it's not spot access, we can't take it + return true + }) + +} + +// filterOutOfflineExporters filters out the exporters that are not online +func filterOutOfflineExporters(approvedExporters []ApprovedExporter) []ApprovedExporter { + onlineExporters := slices.DeleteFunc( + approvedExporters, + func(approvedExporter ApprovedExporter) bool { + return !meta.IsStatusConditionTrue( + approvedExporter.Exporter.Status.Conditions, + string(jumpstarterdevv1alpha1.ExporterConditionTypeRegistered), + ) || !meta.IsStatusConditionTrue( + approvedExporter.Exporter.Status.Conditions, + string(jumpstarterdevv1alpha1.ExporterConditionTypeOnline), + ) + }, + ) + return onlineExporters +} + +// SetupWithManager sets up the controller with the Manager. +func (r *LeaseReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&jumpstarterdevv1alpha1.Lease{}). + Complete(r) +} diff --git a/controller/internal/controller/lease_controller_test.go b/controller/internal/controller/lease_controller_test.go new file mode 100644 index 000000000..e3dc97f6f --- /dev/null +++ b/controller/internal/controller/lease_controller_test.go @@ -0,0 +1,1814 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "time" + + jumpstarterdevv1alpha1 "github.com/jumpstarter-dev/jumpstarter-controller/api/v1alpha1" + "github.com/jumpstarter-dev/jumpstarter-controller/internal/oidc" + cpb "github.com/jumpstarter-dev/jumpstarter-controller/internal/protocol/jumpstarter/client/v1" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/timestamppb" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +const ( + lease1Name = "lease1" + lease2Name = "lease2" + lease3Name = "lease3" +) + +var leaseDutA2Sec = &jumpstarterdevv1alpha1.Lease{ + ObjectMeta: metav1.ObjectMeta{ + Name: "lease1", + Namespace: "default", + }, + Spec: jumpstarterdevv1alpha1.LeaseSpec{ + ClientRef: corev1.LocalObjectReference{ + Name: testClient.Name, + }, + Selector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "dut": "a", + }, + }, + Duration: &metav1.Duration{ + Duration: 2 * time.Second, + }, + }, +} +var _ = Describe("Lease Controller", func() { + BeforeEach(func() { + createExporters(context.Background(), testExporter1DutA, testExporter2DutA, testExporter3DutB) + setExporterOnlineConditions(context.Background(), testExporter1DutA.Name, metav1.ConditionTrue) + setExporterOnlineConditions(context.Background(), testExporter2DutA.Name, metav1.ConditionTrue) + setExporterOnlineConditions(context.Background(), testExporter3DutB.Name, metav1.ConditionTrue) + }) + AfterEach(func() { + ctx := context.Background() + deleteExporters(ctx, testExporter1DutA, testExporter2DutA, testExporter3DutB) + deleteLeases(ctx, lease1Name, lease2Name, lease3Name) + }) + + When("trying to lease with an empty selector", func() { + It("should fail right away", func() { + lease := leaseDutA2Sec.DeepCopy() + lease.Spec.Selector.MatchLabels = nil + + ctx := context.Background() + Expect(k8sClient.Create(ctx, lease)).To(Succeed()) + _ = reconcileLease(ctx, lease) + + updatedLease := getLease(ctx, lease.Name) + Expect(updatedLease.Status.ExporterRef).To(BeNil()) + + Expect(meta.IsStatusConditionTrue( + updatedLease.Status.Conditions, + string(jumpstarterdevv1alpha1.LeaseConditionTypeInvalid), + )).To(BeTrue()) + }) + }) + + When("trying to lease an available exporter", func() { + It("should acquire lease right away", func() { + lease := leaseDutA2Sec.DeepCopy() + + ctx := context.Background() + Expect(k8sClient.Create(ctx, lease)).To(Succeed()) + _ = reconcileLease(ctx, lease) + + updatedLease := getLease(ctx, lease.Name) + Expect(updatedLease.Status.ExporterRef).NotTo(BeNil()) + Expect(updatedLease.Status.ExporterRef.Name).To(BeElementOf([]string{testExporter1DutA.Name, testExporter2DutA.Name})) + Expect(updatedLease.Status.BeginTime).NotTo(BeNil()) + + updatedExporter := getExporter(ctx, updatedLease.Status.ExporterRef.Name) + Expect(updatedExporter.Status.LeaseRef).NotTo(BeNil()) + Expect(updatedExporter.Status.LeaseRef.Name).To(Equal(lease.Name)) + }) + + It("should be released after the lease time", func() { + lease := leaseDutA2Sec.DeepCopy() + lease.Spec.Duration = &metav1.Duration{Duration: 100 * time.Millisecond} + + ctx := context.Background() + Expect(k8sClient.Create(ctx, lease)).To(Succeed()) + _ = reconcileLease(ctx, lease) + + updatedLease := getLease(ctx, lease.Name) + Expect(updatedLease.Status.ExporterRef).NotTo(BeNil()) + + exporterName := updatedLease.Status.ExporterRef.Name + + // Poll until lease expires + Eventually(func() bool { + _ = reconcileLease(ctx, lease) + updatedLease = getLease(ctx, lease.Name) + return updatedLease.Status.Ended + }).WithTimeout(2000 * time.Millisecond).WithPolling(50 * time.Millisecond).Should(BeTrue()) + + // exporter is retained for record purposes + Expect(updatedLease.Status.ExporterRef).NotTo(BeNil()) + + // the exporter should have no lease mark on status + updatedExporter := getExporter(ctx, exporterName) + Expect(updatedExporter.Status.LeaseRef).To(BeNil()) + + }) + }) + + When("trying to lease a non existing exporter", func() { + It("should fail right away", func() { + lease := leaseDutA2Sec.DeepCopy() + lease.Spec.Selector.MatchLabels["dut"] = "does-not-exist" + + ctx := context.Background() + Expect(k8sClient.Create(ctx, lease)).To(Succeed()) + _ = reconcileLease(ctx, lease) + + updatedLease := getLease(ctx, lease.Name) + Expect(updatedLease.Status.ExporterRef).To(BeNil()) + + Expect(meta.IsStatusConditionTrue( + updatedLease.Status.Conditions, + string(jumpstarterdevv1alpha1.LeaseConditionTypeUnsatisfiable), + )).To(BeTrue()) + }) + }) + + When("trying to lease an offline exporter", func() { + It("should set status to pending with offline reason", func() { + lease := leaseDutA2Sec.DeepCopy() + + ctx := context.Background() + + setExporterOnlineConditions(ctx, testExporter1DutA.Name, metav1.ConditionFalse) + setExporterOnlineConditions(ctx, testExporter2DutA.Name, metav1.ConditionFalse) + + Expect(k8sClient.Create(ctx, lease)).To(Succeed()) + _ = reconcileLease(ctx, lease) + + updatedLease := getLease(ctx, lease.Name) + Expect(updatedLease.Status.ExporterRef).To(BeNil()) + + Expect(meta.IsStatusConditionTrue( + updatedLease.Status.Conditions, + string(jumpstarterdevv1alpha1.LeaseConditionTypePending), + )).To(BeTrue()) + + // Check that the condition has the correct reason + condition := meta.FindStatusCondition(updatedLease.Status.Conditions, string(jumpstarterdevv1alpha1.LeaseConditionTypePending)) + Expect(condition).NotTo(BeNil()) + Expect(condition.Reason).To(Equal("Offline")) + }) + }) + + When("trying to lease approved exporters that are offline", func() { + It("should set status to pending with offline reason", func() { + lease := leaseDutA2Sec.DeepCopy() + + ctx := context.Background() + + // Create a policy that approves the exporters + policy := &jumpstarterdevv1alpha1.ExporterAccessPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-policy", + Namespace: "default", + }, + Spec: jumpstarterdevv1alpha1.ExporterAccessPolicySpec{ + ExporterSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "dut": "a", + }, + }, + Policies: []jumpstarterdevv1alpha1.Policy{ + { + Priority: 0, + From: []jumpstarterdevv1alpha1.From{ + { + ClientSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "name": "client", + }, + }, + }, + }, + }, + }, + }, + } + Expect(k8sClient.Create(ctx, policy)).To(Succeed()) + + // Set exporters offline while they are approved by policy + setExporterOnlineConditions(ctx, testExporter1DutA.Name, metav1.ConditionFalse) + setExporterOnlineConditions(ctx, testExporter2DutA.Name, metav1.ConditionFalse) + + Expect(k8sClient.Create(ctx, lease)).To(Succeed()) + _ = reconcileLease(ctx, lease) + + updatedLease := getLease(ctx, lease.Name) + Expect(updatedLease.Status.ExporterRef).To(BeNil()) + + Expect(meta.IsStatusConditionTrue( + updatedLease.Status.Conditions, + string(jumpstarterdevv1alpha1.LeaseConditionTypePending), + )).To(BeTrue()) + + // Check that the condition has the correct reason + condition := meta.FindStatusCondition(updatedLease.Status.Conditions, string(jumpstarterdevv1alpha1.LeaseConditionTypePending)) + Expect(condition).NotTo(BeNil()) + Expect(condition.Reason).To(Equal("Offline")) + Expect(condition.Message).To(ContainSubstring("none of them are online")) + + // Clean up + Expect(k8sClient.Delete(ctx, policy)).To(Succeed()) + }) + }) + + When("trying to lease exporters that match selector but are not approved by any policy", func() { + It("should set status to unsatisfiable with NoAccess reason", func() { + lease := leaseDutA2Sec.DeepCopy() + + ctx := context.Background() + + // Create a policy that does NOT approve the exporters (different client selector) + policy := &jumpstarterdevv1alpha1.ExporterAccessPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-policy", + Namespace: "default", + }, + Spec: jumpstarterdevv1alpha1.ExporterAccessPolicySpec{ + ExporterSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "dut": "a", + }, + }, + Policies: []jumpstarterdevv1alpha1.Policy{ + { + Priority: 0, + From: []jumpstarterdevv1alpha1.From{ + { + ClientSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "name": "different-client", // Different from testClient + }, + }, + }, + }, + }, + }, + }, + } + Expect(k8sClient.Create(ctx, policy)).To(Succeed()) + + Expect(k8sClient.Create(ctx, lease)).To(Succeed()) + _ = reconcileLease(ctx, lease) + + updatedLease := getLease(ctx, lease.Name) + Expect(updatedLease.Status.ExporterRef).To(BeNil()) + + Expect(meta.IsStatusConditionTrue( + updatedLease.Status.Conditions, + string(jumpstarterdevv1alpha1.LeaseConditionTypeUnsatisfiable), + )).To(BeTrue()) + + // Check that the condition has the correct reason + condition := meta.FindStatusCondition(updatedLease.Status.Conditions, string(jumpstarterdevv1alpha1.LeaseConditionTypeUnsatisfiable)) + Expect(condition).NotTo(BeNil()) + Expect(condition.Reason).To(Equal("NoAccess")) + Expect(condition.Message).To(ContainSubstring("none of them are approved by any policy")) + + // Clean up + Expect(k8sClient.Delete(ctx, policy)).To(Succeed()) + }) + }) + + When("trying to lease exporters, and some matching exporters are online and while others are offline", func() { + It("should acquire lease for the online exporters", func() { + lease := leaseDutA2Sec.DeepCopy() + + ctx := context.Background() + + setExporterOnlineConditions(ctx, testExporter1DutA.Name, metav1.ConditionFalse) + + Expect(k8sClient.Create(ctx, lease)).To(Succeed()) + _ = reconcileLease(ctx, lease) + + updatedLease := getLease(ctx, lease.Name) + Expect(updatedLease.Status.ExporterRef).NotTo(BeNil()) + Expect(updatedLease.Status.ExporterRef.Name).To(BeElementOf([]string{testExporter2DutA.Name})) + Expect(updatedLease.Status.BeginTime).NotTo(BeNil()) + + updatedExporter := getExporter(ctx, updatedLease.Status.ExporterRef.Name) + Expect(updatedExporter.Status.LeaseRef).NotTo(BeNil()) + Expect(updatedExporter.Status.LeaseRef.Name).To(Equal(lease.Name)) + }) + }) + + When("trying to lease a busy exporter", func() { + It("should not be acquired", func() { + lease := leaseDutA2Sec.DeepCopy() + lease.Spec.Selector.MatchLabels["dut"] = "b" + + ctx := context.Background() + Expect(k8sClient.Create(ctx, lease)).To(Succeed()) + _ = reconcileLease(ctx, lease) + + updatedLease := getLease(ctx, lease.Name) + Expect(updatedLease.Status.ExporterRef).NotTo(BeNil()) + Expect(updatedLease.Status.ExporterRef.Name).To(Equal(testExporter3DutB.Name)) + + updatedExporter := getExporter(ctx, updatedLease.Status.ExporterRef.Name) + Expect(updatedExporter.Status.LeaseRef).NotTo(BeNil()) + Expect(updatedExporter.Status.LeaseRef.Name).To(Equal(lease.Name)) + + // create another lease that attempts to acquire the only dut b exporter + // which is already leased + lease2 := leaseDutA2Sec.DeepCopy() + lease2.Name = lease2Name + lease2.Spec.Selector.MatchLabels["dut"] = "b" + Expect(k8sClient.Create(ctx, lease2)).To(Succeed()) + _ = reconcileLease(ctx, lease2) + + updatedLease = getLease(ctx, lease2Name) + Expect(updatedLease.Status.ExporterRef).To(BeNil()) + + Expect(meta.IsStatusConditionTrue( + updatedLease.Status.Conditions, + string(jumpstarterdevv1alpha1.LeaseConditionTypePending), + )).To(BeTrue()) + + // Check that the condition has the correct reason and message format + condition := meta.FindStatusCondition(updatedLease.Status.Conditions, string(jumpstarterdevv1alpha1.LeaseConditionTypePending)) + Expect(condition).NotTo(BeNil()) + Expect(condition.Reason).To(Equal("NotAvailable")) + Expect(condition.Message).To(ContainSubstring("but all of them are already leased")) + }) + + It("should be acquired when a valid exporter lease times out", func() { + lease := leaseDutA2Sec.DeepCopy() + lease.Spec.Selector.MatchLabels["dut"] = "b" + lease.Spec.Duration = &metav1.Duration{Duration: 500 * time.Millisecond} + + ctx := context.Background() + Expect(k8sClient.Create(ctx, lease)).To(Succeed()) + _ = reconcileLease(ctx, lease) + + updatedLease := getLease(ctx, lease.Name) + Expect(updatedLease.Status.ExporterRef).NotTo(BeNil()) + Expect(updatedLease.Status.ExporterRef.Name).To(Equal(testExporter3DutB.Name)) + + updatedExporter := getExporter(ctx, updatedLease.Status.ExporterRef.Name) + Expect(updatedExporter.Status.LeaseRef).NotTo(BeNil()) + Expect(updatedExporter.Status.LeaseRef.Name).To(Equal(lease.Name)) + + // create another lease that attempts to acquire the only dut b exporter + // which is already leased + lease2 := leaseDutA2Sec.DeepCopy() + lease2.Name = lease2Name + lease2.Spec.Selector.MatchLabels["dut"] = "b" + Expect(k8sClient.Create(ctx, lease2)).To(Succeed()) + _ = reconcileLease(ctx, lease2) + + updatedLease = getLease(ctx, lease2Name) + Expect(updatedLease.Status.ExporterRef).To(BeNil()) + // TODO: add and check status conditions of the lease to indicate that the lease is waiting + + // Poll until first lease expires and second lease acquires exporter + Eventually(func() bool { + _ = reconcileLease(ctx, lease) + _ = reconcileLease(ctx, lease2) + updatedLease = getLease(ctx, lease2Name) + return updatedLease.Status.ExporterRef != nil + }).WithTimeout(2500 * time.Millisecond).WithPolling(50 * time.Millisecond).Should(BeTrue()) + + }) + }) + + When("releasing a lease early", func() { + It("should release the lease and exporter right away", func() { + lease := leaseDutA2Sec.DeepCopy() + + ctx := context.Background() + Expect(k8sClient.Create(ctx, lease)).To(Succeed()) + _ = reconcileLease(ctx, lease) + + updatedLease := getLease(ctx, lease.Name) + Expect(updatedLease.Status.ExporterRef).NotTo(BeNil()) + + exporterName := updatedLease.Status.ExporterRef.Name + + // release the lease early + // TODO: through the API we cannot set the status condition, we get this through the RPC, + // we should consider adding a flag on the spec to do this, or look at the duration too + updatedLease.Spec.Release = true + + Expect(k8sClient.Update(ctx, updatedLease)).To(Succeed()) + + _ = reconcileLease(ctx, updatedLease) + + updatedLease = getLease(ctx, lease.Name) + Expect(updatedLease.Status.ExporterRef).NotTo(BeNil()) + Expect(updatedLease.Status.Ended).To(BeTrue()) + + updatedExporter := getExporter(ctx, exporterName) + Expect(updatedExporter.Status.LeaseRef).To(BeNil()) + }) + }) +}) + +var testExporter1DutA = &jumpstarterdevv1alpha1.Exporter{ + ObjectMeta: metav1.ObjectMeta{ + Name: "exporter1-dut-a", + Namespace: "default", + Labels: map[string]string{ + "dut": "a", + }, + }, +} + +var testExporter2DutA = &jumpstarterdevv1alpha1.Exporter{ + ObjectMeta: metav1.ObjectMeta{ + Name: "exporter2-dut-a", + Namespace: "default", + Labels: map[string]string{ + "dut": "a", + }, + }, +} + +var testExporter3DutB = &jumpstarterdevv1alpha1.Exporter{ + ObjectMeta: metav1.ObjectMeta{ + Name: "exporter3-dut-b", + Namespace: "default", + Labels: map[string]string{ + "dut": "b", + }, + }, +} + +func setExporterOnlineConditions(ctx context.Context, name string, status metav1.ConditionStatus) { + exporter := getExporter(ctx, name) + meta.SetStatusCondition(&exporter.Status.Conditions, metav1.Condition{ + Type: string(jumpstarterdevv1alpha1.ExporterConditionTypeRegistered), + Status: status, + Reason: "dummy", + }) + meta.SetStatusCondition(&exporter.Status.Conditions, metav1.Condition{ + Type: string(jumpstarterdevv1alpha1.ExporterConditionTypeOnline), + Status: status, + Reason: "dummy", + }) + if status == metav1.ConditionTrue { + exporter.Status.Devices = []jumpstarterdevv1alpha1.Device{{}} + exporter.Status.LastSeen = metav1.Now() + } else { + exporter.Status.Devices = nil + exporter.Status.LastSeen = metav1.NewTime(metav1.Now().Add(-time.Minute * 2)) + } + Expect(k8sClient.Status().Update(ctx, exporter)).To(Succeed()) +} + +func reconcileLease(ctx context.Context, lease *jumpstarterdevv1alpha1.Lease) reconcile.Result { + + // reconcile the exporters + typeNamespacedName := types.NamespacedName{ + Name: lease.Name, + Namespace: "default", + } + + leaseReconciler := &LeaseReconciler{ + Client: k8sClient, + Scheme: k8sClient.Scheme(), + } + + signer, err := oidc.NewSignerFromSeed([]byte{}, "https://example.com", "dummy") + Expect(err).NotTo(HaveOccurred()) + + exporterReconciler := &ExporterReconciler{ + Client: k8sClient, + Scheme: k8sClient.Scheme(), + Signer: signer, + } + + res, err := leaseReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + + for _, owner := range getLease(ctx, lease.Name).OwnerReferences { + _, err := exporterReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: types.NamespacedName{Namespace: lease.Namespace, Name: owner.Name}, + }) + Expect(err).NotTo(HaveOccurred()) + } + + return res +} + +func getLease(ctx context.Context, name string) *jumpstarterdevv1alpha1.Lease { + lease := &jumpstarterdevv1alpha1.Lease{} + err := k8sClient.Get(ctx, types.NamespacedName{ + Name: name, + Namespace: "default", + }, lease) + Expect(err).NotTo(HaveOccurred()) + return lease +} + +func getExporter(ctx context.Context, name string) *jumpstarterdevv1alpha1.Exporter { + exporter := &jumpstarterdevv1alpha1.Exporter{} + err := k8sClient.Get(ctx, types.NamespacedName{ + Name: name, + Namespace: "default", + }, exporter) + Expect(err).NotTo(HaveOccurred()) + return exporter +} + +func deleteLeases(ctx context.Context, leases ...string) { + for _, lease := range leases { + leaseObj := &jumpstarterdevv1alpha1.Lease{ + ObjectMeta: metav1.ObjectMeta{ + Name: lease, + Namespace: "default", + }, + } + _ = k8sClient.Delete(ctx, leaseObj) + } +} + +var _ = Describe("orderApprovedExporters", func() { + When("approved exporters are under a lease", func() { + It("should put them last", func() { + approvedExporters := []ApprovedExporter{ + { + Policy: jumpstarterdevv1alpha1.Policy{Priority: 0, SpotAccess: false}, + Exporter: *testExporter1DutA, + ExistingLease: &jumpstarterdevv1alpha1.Lease{}, + }, + { + Policy: jumpstarterdevv1alpha1.Policy{Priority: 0, SpotAccess: false}, + Exporter: *testExporter2DutA, + }, + } + ordered := orderApprovedExporters(approvedExporters) + Expect(ordered[0].Exporter.Name).To(Equal(testExporter2DutA.Name)) + Expect(ordered[0].ExistingLease).To(BeNil()) + Expect(ordered[1].Exporter.Name).To(Equal(testExporter1DutA.Name)) + Expect(ordered[1].ExistingLease).NotTo(BeNil()) + }) + }) + + When("some approved exporters are accessible in spot mode", func() { + It("should put them last", func() { + approvedExporters := []ApprovedExporter{ + { + Policy: jumpstarterdevv1alpha1.Policy{Priority: 0, SpotAccess: true}, + Exporter: *testExporter1DutA, + ExistingLease: &jumpstarterdevv1alpha1.Lease{}, + }, + { + Policy: jumpstarterdevv1alpha1.Policy{Priority: 0, SpotAccess: false}, + Exporter: *testExporter2DutA, + ExistingLease: &jumpstarterdevv1alpha1.Lease{}, + }, + } + ordered := orderApprovedExporters(approvedExporters) + Expect(ordered[0].Exporter.Name).To(Equal(testExporter2DutA.Name)) + Expect(ordered[0].Policy.SpotAccess).To(BeFalse()) + Expect(ordered[1].Exporter.Name).To(Equal(testExporter1DutA.Name)) + Expect(ordered[1].Policy.SpotAccess).To(BeTrue()) + }) + }) + + When("some approved exporters have different policy priorities", func() { + It("should order them by priority", func() { + approvedExporters := []ApprovedExporter{ + { + Policy: jumpstarterdevv1alpha1.Policy{Priority: 5, SpotAccess: false}, + Exporter: *testExporter1DutA, + }, + { + Policy: jumpstarterdevv1alpha1.Policy{Priority: 10, SpotAccess: false}, + Exporter: *testExporter2DutA, + }, + { + Policy: jumpstarterdevv1alpha1.Policy{Priority: 100, SpotAccess: false}, + Exporter: *testExporter2DutA, + }, + } + ordered := orderApprovedExporters(approvedExporters) + Expect(ordered[0].Policy.Priority).To(Equal(int(100))) + Expect(ordered[1].Policy.Priority).To(Equal(int(10))) + Expect(ordered[2].Policy.Priority).To(Equal(int(5))) + + }) + }) + + When("some approved exporters have same policy priorities and no other traits", func() { + It("should order them by name", func() { + approvedExporters := []ApprovedExporter{ + { + Policy: jumpstarterdevv1alpha1.Policy{Priority: 5, SpotAccess: false}, + Exporter: *testExporter2DutA, + }, + { + Policy: jumpstarterdevv1alpha1.Policy{Priority: 5, SpotAccess: false}, + Exporter: *testExporter1DutA, + }, + } + ordered := orderApprovedExporters(approvedExporters) + + Expect(ordered[0].Exporter.Name).To(Equal(testExporter1DutA.Name)) + Expect(ordered[1].Exporter.Name).To(Equal(testExporter2DutA.Name)) + }) + }) + + When("mixed priorities, spot access, lease status are in the list", func() { + It("should order them properly", func() { + approvedExporters := []ApprovedExporter{ + { + Policy: jumpstarterdevv1alpha1.Policy{Priority: 5, SpotAccess: false}, + Exporter: *testExporter2DutA, + }, + { + Policy: jumpstarterdevv1alpha1.Policy{Priority: 100, SpotAccess: true}, + Exporter: *testExporter2DutA, + ExistingLease: &jumpstarterdevv1alpha1.Lease{}, + }, + { + Policy: jumpstarterdevv1alpha1.Policy{Priority: 10, SpotAccess: false}, + Exporter: *testExporter1DutA, + }, + { + Policy: jumpstarterdevv1alpha1.Policy{Priority: 5, SpotAccess: false}, + Exporter: *testExporter1DutA, + }, + { + Policy: jumpstarterdevv1alpha1.Policy{Priority: 10, SpotAccess: true}, + Exporter: *testExporter2DutA, + }, + } + + ordered := orderApprovedExporters(approvedExporters) + Expect(ordered[0].Policy.Priority).To(Equal(int(10))) + Expect(ordered[0].Policy.SpotAccess).To(BeFalse()) + Expect(ordered[0].Exporter.Name).To(Equal(testExporter1DutA.Name)) + + Expect(ordered[1].Policy.Priority).To(Equal(int(5))) + Expect(ordered[1].Policy.SpotAccess).To(BeFalse()) + Expect(ordered[1].Exporter.Name).To(Equal(testExporter1DutA.Name)) + + Expect(ordered[2].Policy.Priority).To(Equal(int(5))) + Expect(ordered[2].Policy.SpotAccess).To(BeFalse()) + Expect(ordered[2].Exporter.Name).To(Equal(testExporter2DutA.Name)) + + Expect(ordered[3].Policy.Priority).To(Equal(int(10))) + Expect(ordered[3].Policy.SpotAccess).To(BeTrue()) + Expect(ordered[3].Exporter.Name).To(Equal(testExporter2DutA.Name)) + + Expect(ordered[4].Policy.Priority).To(Equal(int(100))) + Expect(ordered[4].Policy.SpotAccess).To(BeTrue()) + Expect(ordered[4].Exporter.Name).To(Equal(testExporter2DutA.Name)) + + }) + }) +}) + +var _ = Describe("Scheduled Leases", func() { + BeforeEach(func() { + createExporters(context.Background(), testExporter1DutA, testExporter2DutA, testExporter3DutB) + setExporterOnlineConditions(context.Background(), testExporter1DutA.Name, metav1.ConditionTrue) + setExporterOnlineConditions(context.Background(), testExporter2DutA.Name, metav1.ConditionTrue) + setExporterOnlineConditions(context.Background(), testExporter3DutB.Name, metav1.ConditionTrue) + }) + AfterEach(func() { + ctx := context.Background() + deleteExporters(ctx, testExporter1DutA, testExporter2DutA, testExporter3DutB) + deleteLeases(ctx, lease1Name, lease2Name, lease3Name) + }) + + When("creating lease with Duration only (immediate lease)", func() { + It("should acquire exporter immediately and set effective begin time", func() { + lease := leaseDutA2Sec.DeepCopy() + lease.Spec.Duration = &metav1.Duration{Duration: 2 * time.Second} + lease.Spec.BeginTime = nil + lease.Spec.EndTime = nil + + ctx := context.Background() + beforeCreate := time.Now().Truncate(time.Second) + Expect(k8sClient.Create(ctx, lease)).To(Succeed()) + _ = reconcileLease(ctx, lease) + afterReconcile := time.Now().Truncate(time.Second) + + updatedLease := getLease(ctx, lease.Name) + Expect(updatedLease.Spec.BeginTime).To(BeNil(), "Spec.BeginTime should remain nil for immediate leases") + Expect(updatedLease.Spec.EndTime).To(BeNil(), "Spec.EndTime should remain nil") + Expect(updatedLease.Status.BeginTime).NotTo(BeNil(), "Status.BeginTime should be set") + Expect(updatedLease.Status.BeginTime.Time).To(BeTemporally(">=", beforeCreate)) + Expect(updatedLease.Status.BeginTime.Time).To(BeTemporally("<=", afterReconcile)) + Expect(updatedLease.Status.ExporterRef).NotTo(BeNil(), "Should have acquired exporter immediately") + }) + }) + + When("creating lease with BeginTime + Duration (scheduled lease)", func() { + It("should wait until BeginTime before acquiring exporter", func() { + lease := leaseDutA2Sec.DeepCopy() + futureTime := metav1.NewTime(time.Now().Truncate(time.Second).Add(1 * time.Second)) + lease.Spec.BeginTime = &futureTime + lease.Spec.Duration = &metav1.Duration{Duration: 1 * time.Second} + lease.Spec.EndTime = nil + + ctx := context.Background() + Expect(k8sClient.Create(ctx, lease)).To(Succeed()) + result := reconcileLease(ctx, lease) + + // Should requeue for future time + Expect(result.RequeueAfter).To(BeNumerically(">", 0)) + Expect(result.RequeueAfter).To(BeNumerically("<=", 2*time.Second)) + + updatedLease := getLease(ctx, lease.Name) + Expect(updatedLease.Status.ExporterRef).To(BeNil(), "Should not have acquired exporter yet") + Expect(updatedLease.Status.BeginTime).To(BeNil(), "Status.BeginTime should not be set yet") + + // Poll until BeginTime passes and exporter is acquired + Eventually(func() bool { + _ = reconcileLease(ctx, lease) + updatedLease = getLease(ctx, lease.Name) + return updatedLease.Status.ExporterRef != nil + }).WithTimeout(3*time.Second).WithPolling(50*time.Millisecond).Should(BeTrue(), "Should have acquired exporter after BeginTime") + + Expect(updatedLease.Status.BeginTime).NotTo(BeNil(), "Status.BeginTime should be set") + Expect(updatedLease.Status.BeginTime.Time).To(BeTemporally(">=", futureTime.Time)) + }) + }) + + When("creating lease with BeginTime + EndTime (without Duration)", func() { + It("should calculate Duration and wait until BeginTime", func() { + lease := leaseDutA2Sec.DeepCopy() + beginTime := metav1.NewTime(time.Now().Truncate(time.Second).Add(1 * time.Second)) + endTime := metav1.NewTime(beginTime.Add(1 * time.Second)) + lease.Spec.BeginTime = &beginTime + lease.Spec.EndTime = &endTime + lease.Spec.Duration = nil + + ctx := context.Background() + Expect(k8sClient.Create(ctx, lease)).To(Succeed()) + + // The Duration should be calculated by LeaseFromProtobuf or validation webhook + // For now, we need to set it manually since we're creating directly via k8s client + updatedLease := getLease(ctx, lease.Name) + updatedLease.Spec.Duration = &metav1.Duration{Duration: endTime.Sub(beginTime.Time)} + Expect(k8sClient.Update(ctx, updatedLease)).To(Succeed()) + + result := reconcileLease(ctx, updatedLease) + Expect(result.RequeueAfter).To(BeNumerically(">", 0)) + + updatedLease = getLease(ctx, lease.Name) + Expect(updatedLease.Status.ExporterRef).To(BeNil(), "Should not have acquired exporter yet") + Expect(updatedLease.Spec.Duration.Duration).To(Equal(1 * time.Second)) + + // Poll until BeginTime passes and exporter is acquired + Eventually(func() bool { + _ = reconcileLease(ctx, updatedLease) + updatedLease = getLease(ctx, lease.Name) + return updatedLease.Status.ExporterRef != nil + }).WithTimeout(3*time.Second).WithPolling(50*time.Millisecond).Should(BeTrue(), "Should have acquired exporter") + + Expect(updatedLease.Status.BeginTime).NotTo(BeNil()) + }) + }) + + When("creating lease with EndTime only (immediate lease with fixed end time)", func() { + It("should acquire exporter immediately and end at EndTime", func() { + lease := leaseDutA2Sec.DeepCopy() + endTime := metav1.NewTime(time.Now().Truncate(time.Second).Add(1 * time.Second)) + lease.Spec.BeginTime = nil + lease.Spec.EndTime = &endTime + lease.Spec.Duration = nil + + ctx := context.Background() + Expect(k8sClient.Create(ctx, lease)).To(Succeed()) + _ = reconcileLease(ctx, lease) + + updatedLease := getLease(ctx, lease.Name) + Expect(updatedLease.Status.ExporterRef).NotTo(BeNil(), "Should acquire exporter immediately") + Expect(updatedLease.Status.BeginTime).NotTo(BeNil(), "Status.BeginTime should be set") + Expect(updatedLease.Spec.EndTime.Time).To(Equal(endTime.Time)) + + // Poll until EndTime passes and lease ends + Eventually(func() bool { + _ = reconcileLease(ctx, lease) + updatedLease = getLease(ctx, lease.Name) + return updatedLease.Status.Ended + }).WithTimeout(3*time.Second).WithPolling(50*time.Millisecond).Should(BeTrue(), "Lease should end at specified EndTime") + Expect(updatedLease.Status.EndTime).NotTo(BeNil(), "Status.EndTime should be set") + + // Check EffectiveDuration in protobuf representation + pbLease := updatedLease.ToProtobuf() + Expect(pbLease.EffectiveBeginTime).NotTo(BeNil()) + Expect(pbLease.EffectiveEndTime).NotTo(BeNil()) + Expect(pbLease.EffectiveDuration).NotTo(BeNil()) + + effectiveDuration := pbLease.EffectiveDuration.AsDuration() + actualDuration := updatedLease.Status.EndTime.Sub(updatedLease.Status.BeginTime.Time) + Expect(effectiveDuration).To(BeNumerically("~", actualDuration, 10*time.Millisecond)) + }) + }) + + When("creating lease with EndTime + Duration (calculated future BeginTime)", func() { + It("should calculate BeginTime and wait before acquiring exporter", func() { + lease := leaseDutA2Sec.DeepCopy() + endTime := metav1.NewTime(time.Now().Truncate(time.Second).Add(2 * time.Second)) + duration := 1 * time.Second + expectedBeginTime := endTime.Add(-duration) + + lease.Spec.BeginTime = nil + lease.Spec.EndTime = &endTime + lease.Spec.Duration = &metav1.Duration{Duration: duration} + + ctx := context.Background() + Expect(k8sClient.Create(ctx, lease)).To(Succeed()) + + // The BeginTime should be calculated by LeaseFromProtobuf or validation + updatedLease := getLease(ctx, lease.Name) + updatedLease.Spec.BeginTime = &metav1.Time{Time: expectedBeginTime} + Expect(k8sClient.Update(ctx, updatedLease)).To(Succeed()) + + result := reconcileLease(ctx, updatedLease) + Expect(result.RequeueAfter).To(BeNumerically(">", 0)) + + updatedLease = getLease(ctx, lease.Name) + Expect(updatedLease.Status.ExporterRef).To(BeNil(), "Should not have acquired exporter yet") + Expect(updatedLease.Spec.BeginTime.Time).To(BeTemporally("~", expectedBeginTime, 10*time.Millisecond)) + + // Poll until calculated BeginTime passes and exporter is acquired + Eventually(func() bool { + _ = reconcileLease(ctx, updatedLease) + updatedLease = getLease(ctx, lease.Name) + return updatedLease.Status.ExporterRef != nil + }).WithTimeout(1200*time.Millisecond).WithPolling(50*time.Millisecond).Should(BeTrue(), "Should have acquired exporter after calculated BeginTime") + }) + + It("should start immediately when calculated BeginTime is in the past", func() { + lease := leaseDutA2Sec.DeepCopy() + // Test scenario: Explicit BeginTime in past (simulating EndTime+Duration calculation result) + // Set BeginTime well in the past to ensure it's definitely past even with delays + pastBeginTime := time.Now().Truncate(time.Second).Add(-10 * time.Second) + futureEndTime := time.Now().Truncate(time.Second).Add(20 * time.Second) + + lease.Spec.BeginTime = &metav1.Time{Time: pastBeginTime} + lease.Spec.EndTime = &metav1.Time{Time: futureEndTime} + lease.Spec.Duration = &metav1.Duration{Duration: futureEndTime.Sub(pastBeginTime)} + + ctx := context.Background() + Expect(k8sClient.Create(ctx, lease)).To(Succeed()) + + result := reconcileLease(ctx, lease) + + // The lease should start immediately, but will requeue to check expiration at EndTime + // RequeueAfter should be approximately time until EndTime (~20 seconds) + Expect(result.RequeueAfter).To(BeNumerically(">", 15*time.Second), "Should requeue for expiration check") + Expect(result.RequeueAfter).To(BeNumerically("<=", 21*time.Second), "Requeue should be around EndTime") + + updatedLease := getLease(ctx, lease.Name) + Expect(updatedLease.Status.ExporterRef).NotTo(BeNil(), "Should acquire exporter immediately") + Expect(updatedLease.Status.BeginTime).NotTo(BeNil(), "Status.BeginTime should be set") + + // Status.BeginTime should be the actual acquisition time (now), not the calculated past time + // Allow generous tolerance for CI environments with second-precision timestamps + now := time.Now().Truncate(time.Second) + Expect(updatedLease.Status.BeginTime.Time).To(BeTemporally(">=", now.Add(-2*time.Second))) + Expect(updatedLease.Status.BeginTime.Time).To(BeTemporally("<=", now.Add(2*time.Second))) + + // EffectiveDuration should be based on actual Status.BeginTime, not Spec.BeginTime + // Since timestamps have second precision, allow up to 1 second tolerance + pbLease := updatedLease.ToProtobuf() + Expect(pbLease.EffectiveDuration).NotTo(BeNil()) + actualDuration := pbLease.EffectiveDuration.AsDuration() + // Should be small (just acquired), allowing for second-precision truncation + Expect(actualDuration).To(BeNumerically("<=", 2*time.Second)) + Expect(actualDuration).To(BeNumerically(">=", 0)) + }) + }) + + When("creating lease with BeginTime + EndTime + Duration (all three specified)", func() { + It("should validate consistency and use the values", func() { + lease := leaseDutA2Sec.DeepCopy() + beginTime := metav1.NewTime(time.Now().Truncate(time.Second).Add(1 * time.Second)) + duration := 1 * time.Second + endTime := metav1.NewTime(beginTime.Add(duration)) + + lease.Spec.BeginTime = &beginTime + lease.Spec.EndTime = &endTime + lease.Spec.Duration = &metav1.Duration{Duration: duration} + + ctx := context.Background() + Expect(k8sClient.Create(ctx, lease)).To(Succeed()) + result := reconcileLease(ctx, lease) + + Expect(result.RequeueAfter).To(BeNumerically(">", 0)) + + // Poll until BeginTime passes and exporter is acquired + var updatedLease *jumpstarterdevv1alpha1.Lease + Eventually(func() bool { + _ = reconcileLease(ctx, lease) + updatedLease = getLease(ctx, lease.Name) + return updatedLease.Status.ExporterRef != nil + }).WithTimeout(1200 * time.Millisecond).WithPolling(50 * time.Millisecond).Should(BeTrue()) + + Expect(updatedLease.Status.BeginTime).NotTo(BeNil()) + Expect(updatedLease.Spec.BeginTime.Time).To(Equal(beginTime.Time)) + Expect(updatedLease.Spec.EndTime.Time).To(Equal(endTime.Time)) + Expect(updatedLease.Spec.Duration.Duration).To(Equal(duration)) + }) + + It("should reject when Duration conflicts with EndTime - BeginTime", func() { + // Test through the service layer (LeaseFromProtobuf) which validates + beginTime := metav1.NewTime(time.Now().Truncate(time.Second).Add(1 * time.Second)) + endTime := metav1.NewTime(beginTime.Add(1 * time.Second)) + conflictingDuration := 2 * time.Second // Wrong! Should be 1 second + + // Create via LeaseFromProtobuf to trigger validation + key := types.NamespacedName{Name: "test-lease", Namespace: "default"} + clientRef := corev1.LocalObjectReference{Name: testClient.Name} + + pbLease := &cpb.Lease{ + Selector: "dut=a", + } + pbLease.BeginTime = timestamppb.New(beginTime.Time) + pbLease.EndTime = timestamppb.New(endTime.Time) + pbLease.Duration = durationpb.New(conflictingDuration) + + lease, err := jumpstarterdevv1alpha1.LeaseFromProtobuf(pbLease, key, clientRef) + + // Should fail validation + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("duration conflicts")) + Expect(lease).To(BeNil()) + }) + }) + + When("creating lease with BeginTime already in the past", func() { + It("should start immediately without requeuing", func() { + lease := leaseDutA2Sec.DeepCopy() + // Set BeginTime to 2 seconds in the past to ensure it's definitely passed + nowTime := metav1.NewTime(time.Now().Truncate(time.Second).Add(-2 * time.Second)) + lease.Spec.BeginTime = &nowTime + lease.Spec.Duration = &metav1.Duration{Duration: 1 * time.Second} + + ctx := context.Background() + Expect(k8sClient.Create(ctx, lease)).To(Succeed()) + result := reconcileLease(ctx, lease) + + // Should not requeue (or requeue with 0) + Expect(result.RequeueAfter).To(BeNumerically("<=", 0)) + + updatedLease := getLease(ctx, lease.Name) + Expect(updatedLease.Status.ExporterRef).NotTo(BeNil(), "Should acquire exporter immediately") + Expect(updatedLease.Status.BeginTime).NotTo(BeNil()) + }) + }) + + When("lease expires based on Spec.EndTime", func() { + It("should end the lease at EndTime even if Duration would suggest later", func() { + lease := leaseDutA2Sec.DeepCopy() + endTime := metav1.NewTime(time.Now().Truncate(time.Second).Add(1 * time.Second)) + lease.Spec.EndTime = &endTime + lease.Spec.Duration = &metav1.Duration{Duration: 10 * time.Second} // Much longer than EndTime + + ctx := context.Background() + Expect(k8sClient.Create(ctx, lease)).To(Succeed()) + _ = reconcileLease(ctx, lease) + + updatedLease := getLease(ctx, lease.Name) + Expect(updatedLease.Status.ExporterRef).NotTo(BeNil()) + + // Poll until EndTime passes and lease ends + Eventually(func() bool { + _ = reconcileLease(ctx, lease) + updatedLease = getLease(ctx, lease.Name) + return updatedLease.Status.Ended + }).WithTimeout(3*time.Second).WithPolling(50*time.Millisecond).Should(BeTrue(), "Should respect EndTime over Duration") + Expect(updatedLease.Status.EndTime).NotTo(BeNil()) + + // Verify EffectiveDuration is calculated correctly + pbLease := updatedLease.ToProtobuf() + Expect(pbLease.EffectiveDuration).NotTo(BeNil()) + actualDuration := updatedLease.Status.EndTime.Sub(updatedLease.Status.BeginTime.Time) + // Allow tolerance for CI environments - duration is based on second-truncated times + Expect(pbLease.EffectiveDuration.AsDuration()).To(BeNumerically("~", actualDuration, 1*time.Second)) + // Verify it's shorter than the specified Duration (10s) + Expect(pbLease.EffectiveDuration.AsDuration()).To(BeNumerically("<", 3*time.Second)) + }) + }) + + When("lease with BeginTime expires based on BeginTime + Duration", func() { + It("should end the lease at BeginTime + Duration", func() { + lease := leaseDutA2Sec.DeepCopy() + beginTime := metav1.NewTime(time.Now().Truncate(time.Second).Add(1 * time.Second)) + duration := 1 * time.Second + lease.Spec.BeginTime = &beginTime + lease.Spec.Duration = &metav1.Duration{Duration: duration} + lease.Spec.EndTime = nil + + ctx := context.Background() + Expect(k8sClient.Create(ctx, lease)).To(Succeed()) + + // Poll until BeginTime passes and exporter is acquired + var updatedLease *jumpstarterdevv1alpha1.Lease + Eventually(func() bool { + _ = reconcileLease(ctx, lease) + updatedLease = getLease(ctx, lease.Name) + return updatedLease.Status.ExporterRef != nil + }).WithTimeout(1200 * time.Millisecond).WithPolling(50 * time.Millisecond).Should(BeTrue()) + + // Poll until lease expires (Duration after BeginTime) + Eventually(func() bool { + _ = reconcileLease(ctx, lease) + updatedLease = getLease(ctx, lease.Name) + return updatedLease.Status.Ended + }).WithTimeout(3*time.Second).WithPolling(50*time.Millisecond).Should(BeTrue(), "Should expire at BeginTime + Duration") + Expect(updatedLease.Status.EndTime).NotTo(BeNil()) + + // Verify EffectiveDuration matches the specified duration + // Allow generous tolerance for CI environments with second-precision timestamps + pbLease := updatedLease.ToProtobuf() + Expect(pbLease.EffectiveDuration).NotTo(BeNil()) + Expect(pbLease.EffectiveDuration.AsDuration()).To(BeNumerically("~", duration, 1*time.Second)) + }) + }) + + When("lease without BeginTime expires based on Status.BeginTime + Duration", func() { + It("should end the lease at Status.BeginTime + Duration", func() { + lease := leaseDutA2Sec.DeepCopy() + lease.Spec.Duration = &metav1.Duration{Duration: 1 * time.Second} + lease.Spec.BeginTime = nil + lease.Spec.EndTime = nil + + ctx := context.Background() + Expect(k8sClient.Create(ctx, lease)).To(Succeed()) + _ = reconcileLease(ctx, lease) + + updatedLease := getLease(ctx, lease.Name) + Expect(updatedLease.Status.ExporterRef).NotTo(BeNil()) + Expect(updatedLease.Status.BeginTime).NotTo(BeNil()) + actualBeginTime := updatedLease.Status.BeginTime.Time + + // Poll until lease expires + Eventually(func() bool { + _ = reconcileLease(ctx, lease) + updatedLease = getLease(ctx, lease.Name) + return updatedLease.Status.Ended + }).WithTimeout(2 * time.Second).WithPolling(50 * time.Millisecond).Should(BeTrue()) + Expect(updatedLease.Status.EndTime).NotTo(BeNil()) + + // Verify it expired based on Status.BeginTime + Duration + expectedExpiry := actualBeginTime.Add(1 * time.Second) + Expect(time.Now().Truncate(time.Second)).To(BeTemporally(">=", expectedExpiry)) + + // Verify EffectiveDuration is calculated correctly + // Allow generous tolerance for CI environments with second-precision timestamps + pbLease := updatedLease.ToProtobuf() + Expect(pbLease.EffectiveDuration).NotTo(BeNil()) + Expect(pbLease.EffectiveDuration.AsDuration()).To(BeNumerically("~", 1*time.Second, 1*time.Second)) + }) + }) + + When("checking EffectiveDuration on active lease", func() { + It("should calculate EffectiveDuration as current time minus Status.BeginTime", func() { + lease := leaseDutA2Sec.DeepCopy() + lease.Spec.Duration = &metav1.Duration{Duration: 10 * time.Second} // Long duration so it doesn't expire + + ctx := context.Background() + Expect(k8sClient.Create(ctx, lease)).To(Succeed()) + _ = reconcileLease(ctx, lease) + + updatedLease := getLease(ctx, lease.Name) + Expect(updatedLease.Status.ExporterRef).NotTo(BeNil()) + Expect(updatedLease.Status.BeginTime).NotTo(BeNil()) + Expect(updatedLease.Status.EndTime).To(BeNil(), "Active lease should not have EndTime") + + // Check EffectiveDuration on active lease + beforeCheck := time.Now().Truncate(time.Second) + pbLease := updatedLease.ToProtobuf() + afterCheck := time.Now().Truncate(time.Second).Add(time.Second) + + Expect(pbLease.EffectiveBeginTime).NotTo(BeNil()) + Expect(pbLease.EffectiveEndTime).To(BeNil(), "Active lease should not have EffectiveEndTime") + Expect(pbLease.EffectiveDuration).NotTo(BeNil()) + + // EffectiveDuration should be approximately now() - BeginTime + expectedMinDuration := beforeCheck.Sub(updatedLease.Status.BeginTime.Time) + expectedMaxDuration := afterCheck.Sub(updatedLease.Status.BeginTime.Time) + actualDuration := pbLease.EffectiveDuration.AsDuration() + Expect(actualDuration).To(BeNumerically(">=", expectedMinDuration)) + Expect(actualDuration).To(BeNumerically("<=", expectedMaxDuration)) + }) + }) + + When("multiple leases with different BeginTimes", func() { + It("should acquire exporters at their respective BeginTimes", func() { + ctx := context.Background() + + // Immediate lease + lease1 := leaseDutA2Sec.DeepCopy() + lease1.Name = lease1Name + lease1.Spec.Duration = &metav1.Duration{Duration: 5 * time.Second} + Expect(k8sClient.Create(ctx, lease1)).To(Succeed()) + _ = reconcileLease(ctx, lease1) + + updatedLease1 := getLease(ctx, lease1Name) + Expect(updatedLease1.Status.ExporterRef).NotTo(BeNil()) + exporter1 := updatedLease1.Status.ExporterRef.Name + + // Scheduled lease 1s in future + lease2 := leaseDutA2Sec.DeepCopy() + lease2.Name = lease2Name + futureTime := metav1.NewTime(time.Now().Truncate(time.Second).Add(1 * time.Second)) + lease2.Spec.BeginTime = &futureTime + lease2.Spec.Duration = &metav1.Duration{Duration: 1 * time.Second} + Expect(k8sClient.Create(ctx, lease2)).To(Succeed()) + _ = reconcileLease(ctx, lease2) + + updatedLease2 := getLease(ctx, lease2Name) + Expect(updatedLease2.Status.ExporterRef).To(BeNil(), "Scheduled lease should wait") + + // Poll until lease2's BeginTime passes and exporter is acquired + Eventually(func() bool { + _ = reconcileLease(ctx, lease2) + updatedLease2 = getLease(ctx, lease2Name) + return updatedLease2.Status.ExporterRef != nil + }).WithTimeout(1200*time.Millisecond).WithPolling(50*time.Millisecond).Should(BeTrue(), "Should acquire after BeginTime") + exporter2 := updatedLease2.Status.ExporterRef.Name + + // Should have acquired different exporters (both dut:a exporters) + Expect(exporter2).NotTo(Equal(exporter1)) + Expect([]string{exporter1, exporter2}).To(ConsistOf(testExporter1DutA.Name, testExporter2DutA.Name)) + }) + }) + + // Validation error tests + When("creating lease with BeginTime after EndTime", func() { + It("should reject with validation error", func() { + beginTime := metav1.NewTime(time.Now().Truncate(time.Second).Add(1 * time.Second)) + endTime := metav1.NewTime(beginTime.Add(-1 * time.Second)) // Before BeginTime! + + key := types.NamespacedName{Name: "invalid-lease", Namespace: "default"} + clientRef := corev1.LocalObjectReference{Name: testClient.Name} + + pbLease := &cpb.Lease{ + Selector: "dut=a", + BeginTime: timestamppb.New(beginTime.Time), + EndTime: timestamppb.New(endTime.Time), + // No duration provided - will calculate negative duration from BeginTime > EndTime + } + + lease, err := jumpstarterdevv1alpha1.LeaseFromProtobuf(pbLease, key, clientRef) + + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("duration must be positive")) + Expect(lease).To(BeNil()) + }) + }) + + When("creating lease with BeginTime but zero Duration and no EndTime", func() { + It("should reject with validation error", func() { + beginTime := metav1.NewTime(time.Now().Truncate(time.Second).Add(1 * time.Second)) + + key := types.NamespacedName{Name: "invalid-lease", Namespace: "default"} + clientRef := corev1.LocalObjectReference{Name: testClient.Name} + + pbLease := &cpb.Lease{ + Selector: "dut=a", + } + pbLease.BeginTime = timestamppb.New(beginTime.Time) + // No Duration, no EndTime + + lease, err := jumpstarterdevv1alpha1.LeaseFromProtobuf(pbLease, key, clientRef) + + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("duration is required")) + Expect(lease).To(BeNil()) + }) + }) + + // EndTime in the past + When("creating lease with EndTime already in the past", func() { + It("should create but expire immediately", func() { + lease := leaseDutA2Sec.DeepCopy() + pastEndTime := metav1.NewTime(time.Now().Truncate(time.Second).Add(-500 * time.Millisecond)) + lease.Spec.EndTime = &pastEndTime + lease.Spec.Duration = nil + + ctx := context.Background() + Expect(k8sClient.Create(ctx, lease)).To(Succeed()) + _ = reconcileLease(ctx, lease) + + updatedLease := getLease(ctx, lease.Name) + // Should acquire exporter (or try to) + // Then immediately expire because EndTime is in the past + Expect(updatedLease.Status.Ended).To(BeTrue(), "Lease should be ended immediately") + Expect(updatedLease.Status.EndTime).NotTo(BeNil()) + }) + }) + + When("creating lease with BeginTime in past but EndTime in future", func() { + It("should start immediately and run until EndTime", func() { + lease := leaseDutA2Sec.DeepCopy() + pastBeginTime := metav1.NewTime(time.Now().Truncate(time.Second).Add(-500 * time.Millisecond)) + futureEndTime := metav1.NewTime(time.Now().Truncate(time.Second).Add(1 * time.Second)) + lease.Spec.BeginTime = &pastBeginTime + lease.Spec.EndTime = &futureEndTime + lease.Spec.Duration = &metav1.Duration{Duration: futureEndTime.Sub(pastBeginTime.Time)} + + ctx := context.Background() + Expect(k8sClient.Create(ctx, lease)).To(Succeed()) + _ = reconcileLease(ctx, lease) + + updatedLease := getLease(ctx, lease.Name) + Expect(updatedLease.Status.ExporterRef).NotTo(BeNil(), "Should acquire immediately") + Expect(updatedLease.Status.BeginTime).NotTo(BeNil()) + Expect(updatedLease.Status.Ended).To(BeFalse(), "Should not be ended yet") + + // Poll until EndTime passes and lease ends + Eventually(func() bool { + _ = reconcileLease(ctx, lease) + updatedLease = getLease(ctx, lease.Name) + return updatedLease.Status.Ended + }).WithTimeout(3*time.Second).WithPolling(50*time.Millisecond).Should(BeTrue(), "Should expire at EndTime") + }) + }) + + // Early release scenarios + When("releasing a scheduled lease before it starts", func() { + It("should cancel the scheduled lease", func() { + lease := leaseDutA2Sec.DeepCopy() + futureTime := metav1.NewTime(time.Now().Truncate(time.Second).Add(1 * time.Second)) + lease.Spec.BeginTime = &futureTime + lease.Spec.Duration = &metav1.Duration{Duration: 1 * time.Second} + + ctx := context.Background() + Expect(k8sClient.Create(ctx, lease)).To(Succeed()) + _ = reconcileLease(ctx, lease) + + updatedLease := getLease(ctx, lease.Name) + Expect(updatedLease.Status.ExporterRef).To(BeNil(), "Should not have acquired yet") + Expect(updatedLease.Status.Ended).To(BeFalse()) + + // Release before BeginTime + updatedLease.Spec.Release = true + Expect(k8sClient.Update(ctx, updatedLease)).To(Succeed()) + _ = reconcileLease(ctx, updatedLease) + + updatedLease = getLease(ctx, lease.Name) + Expect(updatedLease.Status.Ended).To(BeTrue(), "Should be cancelled/ended") + Expect(updatedLease.Status.ExporterRef).To(BeNil(), "Should never have acquired exporter") + }) + }) + + When("releasing an active lease early", func() { + It("should have EffectiveDuration matching actual time held", func() { + lease := leaseDutA2Sec.DeepCopy() + lease.Spec.Duration = &metav1.Duration{Duration: 10 * time.Second} // Long duration + + ctx := context.Background() + Expect(k8sClient.Create(ctx, lease)).To(Succeed()) + _ = reconcileLease(ctx, lease) + + updatedLease := getLease(ctx, lease.Name) + Expect(updatedLease.Status.ExporterRef).NotTo(BeNil()) + Expect(updatedLease.Status.BeginTime).NotTo(BeNil()) + beginTime := updatedLease.Status.BeginTime.Time + + // Brief wait to ensure some time has passed + time.Sleep(50 * time.Millisecond) + + // Release early + updatedLease = getLease(ctx, lease.Name) + updatedLease.Spec.Release = true + Expect(k8sClient.Update(ctx, updatedLease)).To(Succeed()) + _ = reconcileLease(ctx, updatedLease) + + updatedLease = getLease(ctx, lease.Name) + Expect(updatedLease.Status.Ended).To(BeTrue()) + Expect(updatedLease.Status.EndTime).NotTo(BeNil()) + + // EffectiveDuration should be actual time held, not 10 seconds + // Allow generous tolerance for CI environments with second-precision timestamps + pbLease := updatedLease.ToProtobuf() + Expect(pbLease.EffectiveDuration).NotTo(BeNil()) + actualDuration := pbLease.EffectiveDuration.AsDuration() + expectedDuration := updatedLease.Status.EndTime.Sub(beginTime) + Expect(actualDuration).To(BeNumerically("~", expectedDuration, 1*time.Second)) + Expect(actualDuration).To(BeNumerically("<=", 2*time.Second), "Should be much less than 10s") + }) + }) + + // Boundary conditions + When("creating lease with BeginTime very close to EndTime", func() { + It("should work with minimal duration", func() { + lease := leaseDutA2Sec.DeepCopy() + beginTime := metav1.NewTime(time.Now().Truncate(time.Second).Add(1 * time.Second)) + endTime := metav1.NewTime(beginTime.Add(1 * time.Second)) // 1 second duration + lease.Spec.BeginTime = &beginTime + lease.Spec.EndTime = &endTime + lease.Spec.Duration = &metav1.Duration{Duration: 1 * time.Second} + + ctx := context.Background() + Expect(k8sClient.Create(ctx, lease)).To(Succeed()) + + // Poll until BeginTime passes and exporter is acquired + var updatedLease *jumpstarterdevv1alpha1.Lease + Eventually(func() bool { + _ = reconcileLease(ctx, lease) + updatedLease = getLease(ctx, lease.Name) + return updatedLease.Status.ExporterRef != nil + }).WithTimeout(1200 * time.Millisecond).WithPolling(50 * time.Millisecond).Should(BeTrue()) + + // Poll until 1-second duration expires + Eventually(func() bool { + _ = reconcileLease(ctx, lease) + updatedLease = getLease(ctx, lease.Name) + return updatedLease.Status.Ended + }).WithTimeout(1200 * time.Millisecond).WithPolling(50 * time.Millisecond).Should(BeTrue()) + }) + }) + + When("lease expires between reconciliation calls", func() { + It("should be marked as ended in next reconcile", func() { + lease := leaseDutA2Sec.DeepCopy() + lease.Spec.Duration = &metav1.Duration{Duration: 150 * time.Millisecond} + + ctx := context.Background() + Expect(k8sClient.Create(ctx, lease)).To(Succeed()) + _ = reconcileLease(ctx, lease) + + updatedLease := getLease(ctx, lease.Name) + Expect(updatedLease.Status.ExporterRef).NotTo(BeNil()) + Expect(updatedLease.Status.Ended).To(BeFalse()) + + // Poll until expiration is detected (lease duration is 150ms) + Eventually(func() bool { + _ = reconcileLease(ctx, lease) + updatedLease = getLease(ctx, lease.Name) + return updatedLease.Status.Ended + }).WithTimeout(500*time.Millisecond).WithPolling(50*time.Millisecond).Should(BeTrue(), "Should be marked as ended") + }) + }) + + // UpdateLease mutation tests + // Note: These tests simulate what UpdateLease does via gRPC by directly + // modifying the lease spec and calling ReconcileLeaseTimeFields + When("updating BeginTime on a lease that has already started", func() { + It("should be rejected in UpdateLease logic", func() { + // This tests the validation that exists in client_service.go UpdateLease + // We simulate it by checking the condition: ExporterRef != nil + lease := leaseDutA2Sec.DeepCopy() + lease.Spec.Duration = &metav1.Duration{Duration: 5 * time.Second} + + ctx := context.Background() + Expect(k8sClient.Create(ctx, lease)).To(Succeed()) + _ = reconcileLease(ctx, lease) + + updatedLease := getLease(ctx, lease.Name) + Expect(updatedLease.Status.ExporterRef).NotTo(BeNil(), "Lease should be active") + + // Try to update BeginTime - this would be rejected by UpdateLease + // We verify the precondition that UpdateLease checks + Expect(updatedLease.Status.ExporterRef).NotTo(BeNil(), "Cannot update BeginTime after lease starts") + }) + }) + + When("updating EndTime on a scheduled lease before it starts", func() { + It("should update EndTime and recalculate Duration", func() { + lease := leaseDutA2Sec.DeepCopy() + beginTime := metav1.NewTime(time.Now().Truncate(time.Second).Add(1 * time.Second)) + endTime := metav1.NewTime(beginTime.Add(1 * time.Second)) + lease.Spec.BeginTime = &beginTime + lease.Spec.EndTime = &endTime + lease.Spec.Duration = &metav1.Duration{Duration: 1 * time.Second} + + ctx := context.Background() + Expect(k8sClient.Create(ctx, lease)).To(Succeed()) + _ = reconcileLease(ctx, lease) + + updatedLease := getLease(ctx, lease.Name) + Expect(updatedLease.Status.ExporterRef).To(BeNil(), "Should not have started yet") + + // Update EndTime (simulating UpdateLease behavior) + newEndTime := metav1.NewTime(beginTime.Add(2 * time.Second)) + updatedLease.Spec.EndTime = &newEndTime + // Clear Duration so it gets recalculated + updatedLease.Spec.Duration = nil + + // Recalculate (this is what UpdateLease does) + err := jumpstarterdevv1alpha1.ReconcileLeaseTimeFields( + &updatedLease.Spec.BeginTime, + &updatedLease.Spec.EndTime, + &updatedLease.Spec.Duration, + ) + Expect(err).NotTo(HaveOccurred()) + + // Duration should be recalculated + Expect(updatedLease.Spec.Duration.Duration).To(Equal(2 * time.Second)) + Expect(updatedLease.Spec.EndTime.Time).To(Equal(newEndTime.Time)) + }) + }) + + When("extending an active lease by updating EndTime", func() { + It("should extend the lease duration", func() { + lease := leaseDutA2Sec.DeepCopy() + endTime := metav1.NewTime(time.Now().Truncate(time.Second).Add(1 * time.Second)) + lease.Spec.EndTime = &endTime + lease.Spec.Duration = nil + + ctx := context.Background() + Expect(k8sClient.Create(ctx, lease)).To(Succeed()) + _ = reconcileLease(ctx, lease) + + updatedLease := getLease(ctx, lease.Name) + Expect(updatedLease.Status.ExporterRef).NotTo(BeNil(), "Should be active") + Expect(updatedLease.Status.Ended).To(BeFalse()) + + // Extend EndTime to 2 seconds from now + newEndTime := metav1.NewTime(time.Now().Truncate(time.Second).Add(2 * time.Second)) + updatedLease.Spec.EndTime = &newEndTime + Expect(k8sClient.Update(ctx, updatedLease)).To(Succeed()) + + // Verify lease is still active after extension + _ = reconcileLease(ctx, lease) + updatedLease = getLease(ctx, lease.Name) + Expect(updatedLease.Status.Ended).To(BeFalse(), "Should not expire yet due to extension") + + // Poll until new EndTime passes and lease ends + Eventually(func() bool { + _ = reconcileLease(ctx, lease) + updatedLease = getLease(ctx, lease.Name) + return updatedLease.Status.Ended + }).WithTimeout(2200*time.Millisecond).WithPolling(50*time.Millisecond).Should(BeTrue(), "Should expire at new EndTime") + }) + }) + + When("shortening an active lease by updating Duration", func() { + It("should shorten the lease duration", func() { + lease := leaseDutA2Sec.DeepCopy() + lease.Spec.Duration = &metav1.Duration{Duration: 1 * time.Second} + + ctx := context.Background() + Expect(k8sClient.Create(ctx, lease)).To(Succeed()) + _ = reconcileLease(ctx, lease) + + updatedLease := getLease(ctx, lease.Name) + Expect(updatedLease.Status.ExporterRef).NotTo(BeNil(), "Should be active") + Expect(updatedLease.Status.BeginTime).NotTo(BeNil()) + + // Shorten to 200ms total duration + updatedLease.Spec.Duration = &metav1.Duration{Duration: 200 * time.Millisecond} + Expect(k8sClient.Update(ctx, updatedLease)).To(Succeed()) + + // Poll until lease expires after shortened duration + Eventually(func() bool { + _ = reconcileLease(ctx, lease) + updatedLease = getLease(ctx, lease.Name) + return updatedLease.Status.Ended + }).WithTimeout(500*time.Millisecond).WithPolling(50*time.Millisecond).Should(BeTrue(), "Should expire after shortened duration") + }) + }) + + When("updating scheduled lease EndTime before it starts", func() { + It("should allow update and adjust timing", func() { + lease := leaseDutA2Sec.DeepCopy() + beginTime := metav1.NewTime(time.Now().Truncate(time.Second).Add(1 * time.Second)) + endTime := metav1.NewTime(beginTime.Add(10 * time.Second)) // Very long lease initially + lease.Spec.BeginTime = &beginTime + lease.Spec.EndTime = &endTime + lease.Spec.Duration = &metav1.Duration{Duration: 10 * time.Second} + + ctx := context.Background() + Expect(k8sClient.Create(ctx, lease)).To(Succeed()) + _ = reconcileLease(ctx, lease) + + updatedLease := getLease(ctx, lease.Name) + Expect(updatedLease.Status.ExporterRef).To(BeNil(), "Should not have started") + + // Shorten EndTime significantly + newEndTime := metav1.NewTime(beginTime.Add(1 * time.Second)) + updatedLease.Spec.EndTime = &newEndTime + // Clear Duration so it gets recalculated + updatedLease.Spec.Duration = nil + + // Recalculate Duration (simulating UpdateLease) + err := jumpstarterdevv1alpha1.ReconcileLeaseTimeFields( + &updatedLease.Spec.BeginTime, + &updatedLease.Spec.EndTime, + &updatedLease.Spec.Duration, + ) + Expect(err).NotTo(HaveOccurred()) + Expect(updatedLease.Spec.Duration.Duration).To(Equal(1 * time.Second)) + + Expect(k8sClient.Update(ctx, updatedLease)).To(Succeed()) + + // Poll until BeginTime passes and exporter is acquired + Eventually(func() bool { + _ = reconcileLease(ctx, updatedLease) + updatedLease = getLease(ctx, lease.Name) + return updatedLease.Status.ExporterRef != nil + }).WithTimeout(1200 * time.Millisecond).WithPolling(50 * time.Millisecond).Should(BeTrue()) + + // Poll until lease expires at new (shortened) EndTime (1s duration) + Eventually(func() bool { + _ = reconcileLease(ctx, lease) + updatedLease = getLease(ctx, lease.Name) + return updatedLease.Status.Ended + }).WithTimeout(1200 * time.Millisecond).WithPolling(50 * time.Millisecond).Should(BeTrue()) + }) + }) + + When("updating a lease with all three fields to maintain consistency", func() { + It("should allow valid updates", func() { + lease := leaseDutA2Sec.DeepCopy() + beginTime := metav1.NewTime(time.Now().Truncate(time.Second).Add(1 * time.Second)) + duration := 500 * time.Millisecond + endTime := metav1.NewTime(beginTime.Add(duration)) + + lease.Spec.BeginTime = &beginTime + lease.Spec.EndTime = &endTime + lease.Spec.Duration = &metav1.Duration{Duration: duration} + + ctx := context.Background() + Expect(k8sClient.Create(ctx, lease)).To(Succeed()) + _ = reconcileLease(ctx, lease) + + updatedLease := getLease(ctx, lease.Name) + Expect(updatedLease.Status.ExporterRef).To(BeNil(), "Should not have started yet") + + // Update all three fields consistently + newDuration := 800 * time.Millisecond + newEndTime := metav1.NewTime(beginTime.Add(newDuration)) + updatedLease.Spec.Duration = &metav1.Duration{Duration: newDuration} + updatedLease.Spec.EndTime = &newEndTime + + // Validate consistency (simulating UpdateLease) + err := jumpstarterdevv1alpha1.ReconcileLeaseTimeFields( + &updatedLease.Spec.BeginTime, + &updatedLease.Spec.EndTime, + &updatedLease.Spec.Duration, + ) + Expect(err).NotTo(HaveOccurred(), "Consistent update should succeed") + Expect(updatedLease.Spec.Duration.Duration).To(Equal(newDuration)) + Expect(updatedLease.Spec.EndTime.Time).To(Equal(newEndTime.Time)) + }) + }) + + When("updating a lease with all three fields to create conflict", func() { + It("should reject updates that break consistency", func() { + // Start with consistent fields + beginTimeVal := metav1.NewTime(time.Now().Truncate(time.Second).Add(1 * time.Second)) + beginTime := &beginTimeVal + duration := 500 * time.Millisecond + endTimeVal := metav1.NewTime(beginTimeVal.Add(duration)) + endTime := &endTimeVal + + // Try to update Duration to conflict with BeginTime and EndTime + conflictingDuration := &metav1.Duration{Duration: 1 * time.Second} // Wrong! EndTime-BeginTime = 500ms + + // Simulate UpdateLease validation + err := jumpstarterdevv1alpha1.ReconcileLeaseTimeFields( + &beginTime, + &endTime, + &conflictingDuration, + ) + + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("duration conflicts")) + }) + }) + + When("updating active lease Duration when all three fields exist", func() { + It("should require updating both Duration and EndTime to keep them consistent", func() { + lease := leaseDutA2Sec.DeepCopy() + beginTime := metav1.NewTime(time.Now().Truncate(time.Second).Add(1 * time.Second)) + duration := 10 * time.Second // Long duration initially + endTime := metav1.NewTime(beginTime.Add(duration)) + + lease.Spec.BeginTime = &beginTime + lease.Spec.EndTime = &endTime + lease.Spec.Duration = &metav1.Duration{Duration: duration} + + ctx := context.Background() + Expect(k8sClient.Create(ctx, lease)).To(Succeed()) + + // Poll until lease starts + var updatedLease *jumpstarterdevv1alpha1.Lease + Eventually(func() bool { + _ = reconcileLease(ctx, lease) + updatedLease = getLease(ctx, lease.Name) + return updatedLease.Status.ExporterRef != nil + }).WithTimeout(1200*time.Millisecond).WithPolling(50*time.Millisecond).Should(BeTrue(), "Should have started") + + // Shorten the lease: Update both Duration AND EndTime together (must stay consistent) + newDuration := 800 * time.Millisecond + updatedLease.Spec.Duration = &metav1.Duration{Duration: newDuration} + newEndTime := metav1.NewTime(beginTime.Add(newDuration)) + updatedLease.Spec.EndTime = &newEndTime + + // Validate the updated fields (should pass since all three are consistent) + err := jumpstarterdevv1alpha1.ReconcileLeaseTimeFields( + &updatedLease.Spec.BeginTime, + &updatedLease.Spec.EndTime, + &updatedLease.Spec.Duration, + ) + Expect(err).NotTo(HaveOccurred()) + + Expect(k8sClient.Update(ctx, updatedLease)).To(Succeed()) + + // Poll until lease expires at new EndTime (800ms duration) + Eventually(func() bool { + _ = reconcileLease(ctx, lease) + updatedLease = getLease(ctx, lease.Name) + return updatedLease.Status.Ended + }).WithTimeout(1500 * time.Millisecond).WithPolling(50 * time.Millisecond).Should(BeTrue()) + }) + }) + + // Additional edge cases + When("two scheduled leases compete for the same exporter", func() { + It("should acquire first lease at BeginTime, then second after first is released", func() { + ctx := context.Background() + + // Give lease1 an earlier BeginTime to ensure deterministic ordering + // Stagger them closely so both BeginTimes will have passed by the time we check lease2 + lease1BeginTime := metav1.NewTime(time.Now().Truncate(time.Second).Add(1 * time.Second)) + lease2BeginTime := metav1.NewTime(time.Now().Truncate(time.Second).Add(1*time.Second + 100*time.Millisecond)) + + // Both leases target dut:b (only one exporter available) + lease1 := leaseDutA2Sec.DeepCopy() + lease1.Name = lease1Name + lease1.Spec.Selector.MatchLabels["dut"] = "b" + lease1.Spec.BeginTime = &lease1BeginTime + lease1.Spec.Duration = &metav1.Duration{Duration: 10 * time.Second} // Long duration, but we'll release early + + lease2 := leaseDutA2Sec.DeepCopy() + lease2.Name = lease2Name + lease2.Spec.Selector.MatchLabels["dut"] = "b" + lease2.Spec.BeginTime = &lease2BeginTime + lease2.Spec.Duration = &metav1.Duration{Duration: 10 * time.Second} + + Expect(k8sClient.Create(ctx, lease1)).To(Succeed()) + Expect(k8sClient.Create(ctx, lease2)).To(Succeed()) + + // Both should be waiting + _ = reconcileLease(ctx, lease1) + _ = reconcileLease(ctx, lease2) + + updatedLease1 := getLease(ctx, lease1Name) + updatedLease2 := getLease(ctx, lease2Name) + Expect(updatedLease1.Status.ExporterRef).To(BeNil()) + Expect(updatedLease2.Status.ExporterRef).To(BeNil()) + + // Poll until lease1's BeginTime passes and it acquires exporter + Eventually(func() bool { + _ = reconcileLease(ctx, lease1) + _ = reconcileLease(ctx, lease2) + updatedLease1 = getLease(ctx, lease1Name) + return updatedLease1.Status.ExporterRef != nil + }).WithTimeout(2*time.Second).WithPolling(50*time.Millisecond).Should(BeTrue(), "lease1 should acquire exporter") + + updatedLease2 = getLease(ctx, lease2Name) + Expect(updatedLease2.Status.ExporterRef).To(BeNil(), "lease2 should still be waiting") + + // Explicitly release lease1 + updatedLease1 = getLease(ctx, lease1Name) + updatedLease1.Spec.Release = true + Expect(k8sClient.Update(ctx, updatedLease1)).To(Succeed()) + + // Poll until lease1 is released and lease2 acquires exporter immediately + Eventually(func() bool { + _ = reconcileLease(ctx, lease1) + _ = reconcileLease(ctx, lease2) + updatedLease1 = getLease(ctx, lease1Name) + updatedLease2 = getLease(ctx, lease2Name) + return updatedLease1.Status.Ended && updatedLease2.Status.ExporterRef != nil + }).WithTimeout(1500*time.Millisecond).WithPolling(50*time.Millisecond).Should(BeTrue(), "lease1 should be released and lease2 should acquire exporter immediately") + }) + }) + + When("deleting a scheduled lease before it starts", func() { + It("should delete successfully without acquiring exporter", func() { + lease := leaseDutA2Sec.DeepCopy() + futureTime := metav1.NewTime(time.Now().Truncate(time.Second).Add(5 * time.Second)) + lease.Spec.BeginTime = &futureTime + lease.Spec.Duration = &metav1.Duration{Duration: 1 * time.Second} + + ctx := context.Background() + Expect(k8sClient.Create(ctx, lease)).To(Succeed()) + _ = reconcileLease(ctx, lease) + + updatedLease := getLease(ctx, lease.Name) + Expect(updatedLease.Status.ExporterRef).To(BeNil(), "Should not have acquired yet") + + // Delete before BeginTime + Expect(k8sClient.Delete(ctx, updatedLease)).To(Succeed()) + + // Verify it's deleted + err := k8sClient.Get(ctx, types.NamespacedName{ + Name: lease.Name, + Namespace: "default", + }, &jumpstarterdevv1alpha1.Lease{}) + Expect(err).To(HaveOccurred(), "Lease should be deleted") + }) + }) + + When("updating scheduled lease to make BeginTime in the past", func() { + It("should start immediately after update", func() { + lease := leaseDutA2Sec.DeepCopy() + futureTime := metav1.NewTime(time.Now().Truncate(time.Second).Add(5 * time.Second)) + lease.Spec.BeginTime = &futureTime + lease.Spec.Duration = &metav1.Duration{Duration: 1 * time.Second} + + ctx := context.Background() + Expect(k8sClient.Create(ctx, lease)).To(Succeed()) + _ = reconcileLease(ctx, lease) + + updatedLease := getLease(ctx, lease.Name) + Expect(updatedLease.Status.ExporterRef).To(BeNil(), "Should not have started yet") + + // Update BeginTime to be in the past + pastTime := metav1.NewTime(time.Now().Truncate(time.Second).Add(-100 * time.Millisecond)) + updatedLease.Spec.BeginTime = &pastTime + Expect(k8sClient.Update(ctx, updatedLease)).To(Succeed()) + + // Should acquire immediately now + _ = reconcileLease(ctx, updatedLease) + + updatedLease = getLease(ctx, lease.Name) + Expect(updatedLease.Status.ExporterRef).NotTo(BeNil(), "Should acquire immediately after BeginTime moved to past") + Expect(updatedLease.Status.BeginTime).NotTo(BeNil()) + + // Verify that actual BeginTime is before the original futureTime (started early) + Expect(updatedLease.Status.BeginTime.Time).To(BeTemporally("<", futureTime.Time), "Should have started before the original scheduled time") + }) + }) + + When("creating lease with negative Duration", func() { + It("should reject with validation error", func() { + key := types.NamespacedName{Name: "invalid-lease", Namespace: "default"} + clientRef := corev1.LocalObjectReference{Name: testClient.Name} + + pbLease := &cpb.Lease{ + Selector: "dut=a", + } + pbLease.Duration = durationpb.New(-1 * time.Second) // Negative! + + lease, err := jumpstarterdevv1alpha1.LeaseFromProtobuf(pbLease, key, clientRef) + + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("duration must be positive")) + Expect(lease).To(BeNil()) + }) + }) + + When("creating lease with EndTime and negative Duration", func() { + It("should reject with validation error", func() { + key := types.NamespacedName{Name: "invalid-lease-2", Namespace: "default"} + clientRef := corev1.LocalObjectReference{Name: testClient.Name} + + endTime := metav1.NewTime(time.Now().Truncate(time.Second).Add(1 * time.Second)) + pbLease := &cpb.Lease{ + Selector: "dut=a", + EndTime: timestamppb.New(endTime.Time), + } + pbLease.Duration = durationpb.New(-2 * time.Second) // Negative! + + lease, err := jumpstarterdevv1alpha1.LeaseFromProtobuf(pbLease, key, clientRef) + + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("duration must be positive")) + Expect(lease).To(BeNil()) + }) + }) +}) diff --git a/controller/internal/controller/secret_helpers.go b/controller/internal/controller/secret_helpers.go new file mode 100644 index 000000000..658dbf02b --- /dev/null +++ b/controller/internal/controller/secret_helpers.go @@ -0,0 +1,85 @@ +package controller + +import ( + "context" + + "github.com/jumpstarter-dev/jumpstarter-controller/internal/oidc" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +const TokenKey string = "token" + +func ensureSecret( + ctx context.Context, + key client.ObjectKey, + kclient client.Client, + scheme *runtime.Scheme, + signer *oidc.Signer, + subject string, + owner metav1.Object, +) (*corev1.Secret, error) { + logger := log.FromContext(ctx).WithName("ensureSecret") + var secret corev1.Secret + if err := kclient.Get(ctx, key, &secret); err != nil { + if !errors.IsNotFound(err) { + logger.Error(err, "failed to get secret") + return nil, err + } + // Secret not present + logger.Info("secret not present, creating") + token, err := signer.Token(subject) + if err != nil { + logger.Error(err, "failed to sign token") + return nil, err + } + secret = corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: key.Namespace, + Name: key.Name, + }, + Type: corev1.SecretTypeOpaque, + Data: map[string][]byte{ + TokenKey: []byte(token), + }, + } + if err := controllerutil.SetControllerReference(owner, &secret, scheme); err != nil { + logger.Error(err, "failed to set controller reference") + return nil, err + } + if err = kclient.Create(ctx, &secret); err != nil { + logger.Error(err, "failed to create secret") + return nil, err + } + return &secret, nil + } else { + original := client.MergeFrom(secret.DeepCopy()) + if err := controllerutil.SetControllerReference(owner, &secret, scheme); err != nil { + logger.Error(err, "failed to set controller reference") + return nil, err + } + token, ok := secret.Data[TokenKey] + if !ok || signer.Validate(string(token)) != nil { + // Secret present but invalid + logger.Info("secret present but invalid, updating") + token, err := signer.Token(subject) + if err != nil { + logger.Error(err, "failed to sign token") + return nil, err + } + secret.Data = map[string][]byte{ + TokenKey: []byte(token), + } + } + if err = kclient.Patch(ctx, &secret, original); err != nil { + logger.Error(err, "failed to update secret") + return nil, err + } + return &secret, nil + } +} diff --git a/controller/internal/controller/suite_test.go b/controller/internal/controller/suite_test.go new file mode 100644 index 000000000..85ddbfd99 --- /dev/null +++ b/controller/internal/controller/suite_test.go @@ -0,0 +1,156 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "fmt" + "os" + "path/filepath" + "runtime" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + jumpstarterdevv1alpha1 "github.com/jumpstarter-dev/jumpstarter-controller/api/v1alpha1" + "github.com/jumpstarter-dev/jumpstarter-controller/internal/oidc" + // +kubebuilder:scaffold:imports +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +var cfg *rest.Config +var k8sClient client.Client +var testEnv *envtest.Environment + +func TestControllers(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "Controller Suite") +} + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + _ = os.Setenv("CONTROLLER_KEY", "somekey") + + By("bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "..", "deploy", "helm", "jumpstarter", "crds")}, + ErrorIfCRDPathMissing: true, + + // The BinaryAssetsDirectory is only required if you want to run the tests directly + // without call the makefile target test. If not informed it will look for the + // default path defined in controller-runtime which is /usr/local/kubebuilder/. + // Note that you must have the required binaries setup under the bin directory to perform + // the tests directly. When we run make test it will be setup and used automatically. + BinaryAssetsDirectory: filepath.Join("..", "..", "bin", "k8s", + fmt.Sprintf("1.30.0-%s-%s", runtime.GOOS, runtime.GOARCH)), + } + + var err error + // cfg is defined in this file globally. + cfg, err = testEnv.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + err = jumpstarterdevv1alpha1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + // +kubebuilder:scaffold:scheme + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient).NotTo(BeNil()) + + createTestClients(context.Background()) + +}) + +var _ = AfterSuite(func() { + By("tearing down the test environment") + err := testEnv.Stop() + Expect(err).NotTo(HaveOccurred()) +}) + +var testClient = &jumpstarterdevv1alpha1.Client{ + ObjectMeta: metav1.ObjectMeta{ + Name: "client", + Namespace: "default", + Labels: map[string]string{ + "name": "client", + }, + }, +} + +func createTestClients(ctx context.Context) { + Expect(k8sClient.Create(ctx, testClient)).To(Succeed()) +} + +func createExporters(ctx context.Context, exporters ...*jumpstarterdevv1alpha1.Exporter) { + for _, exporter := range exporters { + // we need to DeepCopy, otherwise our object gets assigned a resource version etc. + Expect(k8sClient.Create(ctx, exporter.DeepCopy())).To(Succeed()) + + // reconcile the exporters + typeNamespacedName := types.NamespacedName{ + Name: exporter.Name, + Namespace: "default", // TODO(user):Modify as needed + } + + signer, err := oidc.NewSignerFromSeed([]byte{}, "https://example.com", "dummy") + Expect(err).NotTo(HaveOccurred()) + + controllerReconciler := &ExporterReconciler{ + Client: k8sClient, + Scheme: k8sClient.Scheme(), + Signer: signer, + } + + _, err = controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + + } +} + +func deleteExporters(ctx context.Context, exporters ...*jumpstarterdevv1alpha1.Exporter) { + for _, exporter := range exporters { + Expect(k8sClient.Delete(ctx, exporter)).To(Succeed()) + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: exporter.Name + "-exporter", + Namespace: "default", + }, + } + Expect(k8sClient.Delete(ctx, secret)).To(Succeed()) + } +} diff --git a/controller/internal/log/levels.go b/controller/internal/log/levels.go new file mode 100644 index 000000000..151748a97 --- /dev/null +++ b/controller/internal/log/levels.go @@ -0,0 +1,78 @@ +/* +Copyright 2025. The Jumpstarter Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package log + +import ( + "github.com/go-logr/logr" +) + +// Log levels for consistent verbosity across the codebase +// These levels follow the logr convention where higher numbers indicate more verbosity +const ( + // LevelError represents error level logging (level 0, always shown) + LevelError = 0 + + // LevelWarning represents warning level logging (level 1) + LevelWarning = 1 + + // LevelInfo represents info level logging (level 2) + LevelInfo = 2 + + // LevelDebug represents debug level logging (level 3) + LevelDebug = 3 + + // LevelTrace represents trace level logging (level 4) + LevelTrace = 4 + + // LevelVerbose represents very verbose trace logging (level 5) + LevelVerbose = 5 +) + +// WithLevel returns a logger with the specified verbosity level +func WithLevel(logger logr.Logger, level int) logr.Logger { + return logger.V(level) +} + +// Error logs an error message (always shown) +func Error(logger logr.Logger, err error, msg string, keysAndValues ...interface{}) { + logger.Error(err, msg, keysAndValues...) +} + +// Warning logs a warning message +func Warning(logger logr.Logger, msg string, keysAndValues ...interface{}) { + logger.V(LevelWarning).Info(msg, keysAndValues...) +} + +// Info logs an info message +func Info(logger logr.Logger, msg string, keysAndValues ...interface{}) { + logger.V(LevelInfo).Info(msg, keysAndValues...) +} + +// Debug logs a debug message +func Debug(logger logr.Logger, msg string, keysAndValues ...interface{}) { + logger.V(LevelDebug).Info(msg, keysAndValues...) +} + +// Trace logs a trace message +func Trace(logger logr.Logger, msg string, keysAndValues ...interface{}) { + logger.V(LevelTrace).Info(msg, keysAndValues...) +} + +// Verbose logs a very verbose trace message +func Verbose(logger logr.Logger, msg string, keysAndValues ...interface{}) { + logger.V(LevelVerbose).Info(msg, keysAndValues...) +} diff --git a/controller/internal/oidc/config.go b/controller/internal/oidc/config.go new file mode 100644 index 000000000..67550125b --- /dev/null +++ b/controller/internal/oidc/config.go @@ -0,0 +1,106 @@ +package oidc + +import ( + "context" + "os" + + jumpstarterdevv1alpha1 "github.com/jumpstarter-dev/jumpstarter-controller/api/v1alpha1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apiserver/pkg/apis/apiserver" + apiserverv1beta1 "k8s.io/apiserver/pkg/apis/apiserver/v1beta1" + "k8s.io/apiserver/pkg/authentication/authenticator" + tokenunion "k8s.io/apiserver/pkg/authentication/token/union" + "k8s.io/apiserver/pkg/server/dynamiccertificates" + "k8s.io/apiserver/plugin/pkg/authenticator/token/oidc" +) + +func LoadAuthenticationConfiguration( + ctx context.Context, + scheme *runtime.Scheme, + configuration []byte, + signer *Signer, + certificateAuthority string, +) (authenticator.Token, string, error) { + var authenticationConfiguration jumpstarterdevv1alpha1.AuthenticationConfiguration + if err := runtime.DecodeInto( + serializer.NewCodecFactory(scheme, serializer.EnableStrict). + UniversalDecoder(jumpstarterdevv1alpha1.GroupVersion), + configuration, + &authenticationConfiguration, + ); err != nil { + return nil, "", err + } + + if authenticationConfiguration.Internal.Prefix == "" { + authenticationConfiguration.Internal.Prefix = "internal:" + } + + authenticationConfiguration.JWT = append(authenticationConfiguration.JWT, apiserverv1beta1.JWTAuthenticator{ + Issuer: apiserverv1beta1.Issuer{ + URL: signer.Issuer(), + CertificateAuthority: certificateAuthority, + Audiences: []string{signer.Audience()}, + }, + ClaimMappings: apiserverv1beta1.ClaimMappings{ + Username: apiserverv1beta1.PrefixedClaimOrExpression{ + Claim: "sub", + Prefix: &authenticationConfiguration.Internal.Prefix, + }, + }, + }) + + authn, err := newJWTAuthenticator( + ctx, + scheme, + authenticationConfiguration, + ) + if err != nil { + return nil, "", err + } + return authn, authenticationConfiguration.Internal.Prefix, nil +} + +// Reference: https://github.com/kubernetes/kubernetes/blob/v1.32.1/pkg/kubeapiserver/authenticator/config.go#L244 +func newJWTAuthenticator( + ctx context.Context, + scheme *runtime.Scheme, + config jumpstarterdevv1alpha1.AuthenticationConfiguration, +) (authenticator.Token, error) { + var jwtAuthenticators []authenticator.Token + for _, jwtAuthenticator := range config.JWT { + var oidcCAContent oidc.CAContentProvider + if len(jwtAuthenticator.Issuer.CertificateAuthority) > 0 { + var oidcCAError error + if _, err := os.Stat(jwtAuthenticator.Issuer.CertificateAuthority); err == nil { + oidcCAContent, oidcCAError = dynamiccertificates.NewDynamicCAContentFromFile( + "oidc-authenticator", + jwtAuthenticator.Issuer.CertificateAuthority, + ) + jwtAuthenticator.Issuer.CertificateAuthority = "" + } else { + oidcCAContent, oidcCAError = dynamiccertificates.NewStaticCAContent( + "oidc-authenticator", + []byte(jwtAuthenticator.Issuer.CertificateAuthority), + ) + } + if oidcCAError != nil { + return nil, oidcCAError + } + } + var jwtAuthenticatorUnversioned apiserver.JWTAuthenticator + if err := scheme.Convert(&jwtAuthenticator, &jwtAuthenticatorUnversioned, nil); err != nil { + return nil, err + } + oidcAuth, err := oidc.New(ctx, oidc.Options{ + JWTAuthenticator: jwtAuthenticatorUnversioned, + CAContentProvider: oidcCAContent, + SupportedSigningAlgs: oidc.AllValidSigningAlgorithms(), + }) + if err != nil { + return nil, err + } + jwtAuthenticators = append(jwtAuthenticators, oidcAuth) + } + return tokenunion.NewFailOnError(jwtAuthenticators...), nil +} diff --git a/controller/internal/oidc/op.go b/controller/internal/oidc/op.go new file mode 100644 index 000000000..072b8594a --- /dev/null +++ b/controller/internal/oidc/op.go @@ -0,0 +1,109 @@ +package oidc + +import ( + "context" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/sha256" + "encoding/binary" + "math/rand" + "time" + + "filippo.io/keygen" + "github.com/gin-gonic/gin" + "github.com/go-jose/go-jose/v4" + "github.com/golang-jwt/jwt/v5" + "github.com/zitadel/oidc/v3/pkg/oidc" + "github.com/zitadel/oidc/v3/pkg/op" +) + +type Signer struct { + privatekey *ecdsa.PrivateKey + issuer string + audience string +} + +func NewSigner(privateKey *ecdsa.PrivateKey, issuer, audience string) *Signer { + return &Signer{ + privatekey: privateKey, + issuer: issuer, + audience: audience, + } +} + +func NewSignerFromSeed(seed []byte, issuer, audience string) (*Signer, error) { + hash := sha256.Sum256(seed) + source := rand.NewSource(int64(binary.BigEndian.Uint64(hash[:8]))) + reader := rand.New(source) + key, err := keygen.ECDSALegacy(elliptic.P256(), reader) + if err != nil { + return nil, err + } + return NewSigner(key, issuer, audience), nil +} + +func (k *Signer) Issuer() string { + return k.issuer +} + +func (k *Signer) Audience() string { + return k.audience +} + +func (k *Signer) ID() string { + return "default" +} + +func (k *Signer) Algorithm() jose.SignatureAlgorithm { + return jose.ES256 +} + +func (k *Signer) Use() string { + return "sig" +} + +func (k *Signer) Key() any { + return k.privatekey.Public() +} + +func (k *Signer) KeySet(context.Context) ([]op.Key, error) { + return []op.Key{k}, nil +} + +func (k *Signer) Register(group gin.IRoutes) { + group.GET("/.well-known/openid-configuration", func(c *gin.Context) { + op.Discover(c.Writer, &oidc.DiscoveryConfiguration{ + Issuer: k.issuer, + JwksURI: k.issuer + "/jwks", + }) + }) + + group.GET("/jwks", func(c *gin.Context) { + op.Keys(c.Writer, c.Request, k) + }) +} + +func (k *Signer) Validate(token string) error { + _, err := jwt.Parse(token, func(t *jwt.Token) (interface{}, error) { + return &k.privatekey.PublicKey, nil + }, + jwt.WithValidMethods([]string{ + jwt.SigningMethodES256.Alg(), + }), + jwt.WithIssuer(k.issuer), + jwt.WithAudience(k.audience), + ) + return err +} + +func (k *Signer) Token( + subject string, +) (string, error) { + return jwt.NewWithClaims(jwt.SigningMethodES256, jwt.RegisteredClaims{ + Issuer: k.issuer, + Subject: subject, + Audience: []string{k.audience}, + IssuedAt: jwt.NewNumericDate(time.Now()), + ExpiresAt: jwt.NewNumericDate(time.Now().Add(365 * 24 * time.Hour)), // FIXME: rotate keys on expiration + }).SignedString(k.privatekey) +} diff --git a/controller/internal/oidc/token.go b/controller/internal/oidc/token.go new file mode 100644 index 000000000..4030bd1bd --- /dev/null +++ b/controller/internal/oidc/token.go @@ -0,0 +1,105 @@ +package oidc + +import ( + "context" + "fmt" + + jumpstarterdevv1alpha1 "github.com/jumpstarter-dev/jumpstarter-controller/api/v1alpha1" + "github.com/jumpstarter-dev/jumpstarter-controller/internal/authentication" + "github.com/jumpstarter-dev/jumpstarter-controller/internal/authorization" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "k8s.io/apimachinery/pkg/types" + "k8s.io/apiserver/pkg/authorization/authorizer" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func VerifyOIDCToken( + ctx context.Context, + auth authentication.ContextAuthenticator, + attr authorization.ContextAttributesGetter, +) (authorizer.Attributes, error) { + resp, ok, err := auth.AuthenticateContext(ctx) + if err != nil { + return nil, err + } + + if !ok { + return nil, fmt.Errorf("failed to authenticate token") + } + + return attr.ContextAttributes(ctx, resp.User) +} + +func VerifyClientObjectToken( + ctx context.Context, + authz authentication.ContextAuthenticator, + authn authorizer.Authorizer, + attr authorization.ContextAttributesGetter, + kclient client.Client, +) (*jumpstarterdevv1alpha1.Client, error) { + attrs, err := VerifyOIDCToken(ctx, authz, attr) + if err != nil { + return nil, err + } + + if attrs.GetResource() != "Client" { + return nil, status.Errorf(codes.InvalidArgument, "object kind mismatch") + } + + decision, _, err := authn.Authorize(ctx, attrs) + if err != nil { + return nil, err + } + + if decision != authorizer.DecisionAllow { + return nil, status.Errorf(codes.PermissionDenied, "permission denied") + } + + var client jumpstarterdevv1alpha1.Client + if err = kclient.Get(ctx, types.NamespacedName{ + Namespace: attrs.GetNamespace(), + Name: attrs.GetName(), + }, &client); err != nil { + return nil, err + } + + return &client, nil +} + +func VerifyExporterObjectToken( + ctx context.Context, + authz authentication.ContextAuthenticator, + authn authorizer.Authorizer, + attr authorization.ContextAttributesGetter, + kclient client.Client, +) (*jumpstarterdevv1alpha1.Exporter, error) { + attrs, err := VerifyOIDCToken(ctx, authz, attr) + if err != nil { + return nil, err + } + + if attrs.GetResource() != "Exporter" { + return nil, status.Errorf(codes.InvalidArgument, "object kind mismatch") + } + + decision, _, err := authn.Authorize(ctx, attrs) + if err != nil { + return nil, err + } + + if decision != authorizer.DecisionAllow { + return nil, status.Errorf(codes.PermissionDenied, "permission denied") + } + + var exporter jumpstarterdevv1alpha1.Exporter + if err = kclient.Get(ctx, types.NamespacedName{ + Namespace: attrs.GetNamespace(), + Name: attrs.GetName(), + }, &exporter); err != nil { + return nil, err + } + + return &exporter, nil +} diff --git a/controller/internal/protocol/jumpstarter/client/v1/client.pb.go b/controller/internal/protocol/jumpstarter/client/v1/client.pb.go new file mode 100644 index 000000000..2364552e5 --- /dev/null +++ b/controller/internal/protocol/jumpstarter/client/v1/client.pb.go @@ -0,0 +1,905 @@ +// Copyright 2024 The Jumpstarter Authors +// (-- api-linter: core::0215::foreign-type-reference=disabled +// (-- api-linter: core::0192::has-comments=disabled +// (-- api-linter: core::0191::java-package=disabled +// (-- api-linter: core::0191::java-outer-classname=disabled +// (-- api-linter: core::0191::java-multiple-files=disabled + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.10 +// protoc (unknown) +// source: jumpstarter/client/v1/client.proto + +package clientv1 + +import ( + v1 "github.com/jumpstarter-dev/jumpstarter-controller/internal/protocol/jumpstarter/v1" + _ "google.golang.org/genproto/googleapis/api/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + emptypb "google.golang.org/protobuf/types/known/emptypb" + fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Exporter struct { + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Deprecated: Marked as deprecated in jumpstarter/client/v1/client.proto. + Online bool `protobuf:"varint,3,opt,name=online,proto3" json:"online,omitempty"` + Status v1.ExporterStatus `protobuf:"varint,4,opt,name=status,proto3,enum=jumpstarter.v1.ExporterStatus" json:"status,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Exporter) Reset() { + *x = Exporter{} + mi := &file_jumpstarter_client_v1_client_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Exporter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Exporter) ProtoMessage() {} + +func (x *Exporter) ProtoReflect() protoreflect.Message { + mi := &file_jumpstarter_client_v1_client_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Exporter.ProtoReflect.Descriptor instead. +func (*Exporter) Descriptor() ([]byte, []int) { + return file_jumpstarter_client_v1_client_proto_rawDescGZIP(), []int{0} +} + +func (x *Exporter) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Exporter) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} + +// Deprecated: Marked as deprecated in jumpstarter/client/v1/client.proto. +func (x *Exporter) GetOnline() bool { + if x != nil { + return x.Online + } + return false +} + +func (x *Exporter) GetStatus() v1.ExporterStatus { + if x != nil { + return x.Status + } + return v1.ExporterStatus(0) +} + +type Lease struct { + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Selector string `protobuf:"bytes,2,opt,name=selector,proto3" json:"selector,omitempty"` + Duration *durationpb.Duration `protobuf:"bytes,3,opt,name=duration,proto3,oneof" json:"duration,omitempty"` + EffectiveDuration *durationpb.Duration `protobuf:"bytes,4,opt,name=effective_duration,json=effectiveDuration,proto3" json:"effective_duration,omitempty"` + BeginTime *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=begin_time,json=beginTime,proto3,oneof" json:"begin_time,omitempty"` + EffectiveBeginTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=effective_begin_time,json=effectiveBeginTime,proto3,oneof" json:"effective_begin_time,omitempty"` + EndTime *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=end_time,json=endTime,proto3,oneof" json:"end_time,omitempty"` + EffectiveEndTime *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=effective_end_time,json=effectiveEndTime,proto3,oneof" json:"effective_end_time,omitempty"` + Client *string `protobuf:"bytes,9,opt,name=client,proto3,oneof" json:"client,omitempty"` + Exporter *string `protobuf:"bytes,10,opt,name=exporter,proto3,oneof" json:"exporter,omitempty"` + Conditions []*v1.Condition `protobuf:"bytes,11,rep,name=conditions,proto3" json:"conditions,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Lease) Reset() { + *x = Lease{} + mi := &file_jumpstarter_client_v1_client_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Lease) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Lease) ProtoMessage() {} + +func (x *Lease) ProtoReflect() protoreflect.Message { + mi := &file_jumpstarter_client_v1_client_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Lease.ProtoReflect.Descriptor instead. +func (*Lease) Descriptor() ([]byte, []int) { + return file_jumpstarter_client_v1_client_proto_rawDescGZIP(), []int{1} +} + +func (x *Lease) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Lease) GetSelector() string { + if x != nil { + return x.Selector + } + return "" +} + +func (x *Lease) GetDuration() *durationpb.Duration { + if x != nil { + return x.Duration + } + return nil +} + +func (x *Lease) GetEffectiveDuration() *durationpb.Duration { + if x != nil { + return x.EffectiveDuration + } + return nil +} + +func (x *Lease) GetBeginTime() *timestamppb.Timestamp { + if x != nil { + return x.BeginTime + } + return nil +} + +func (x *Lease) GetEffectiveBeginTime() *timestamppb.Timestamp { + if x != nil { + return x.EffectiveBeginTime + } + return nil +} + +func (x *Lease) GetEndTime() *timestamppb.Timestamp { + if x != nil { + return x.EndTime + } + return nil +} + +func (x *Lease) GetEffectiveEndTime() *timestamppb.Timestamp { + if x != nil { + return x.EffectiveEndTime + } + return nil +} + +func (x *Lease) GetClient() string { + if x != nil && x.Client != nil { + return *x.Client + } + return "" +} + +func (x *Lease) GetExporter() string { + if x != nil && x.Exporter != nil { + return *x.Exporter + } + return "" +} + +func (x *Lease) GetConditions() []*v1.Condition { + if x != nil { + return x.Conditions + } + return nil +} + +type GetExporterRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetExporterRequest) Reset() { + *x = GetExporterRequest{} + mi := &file_jumpstarter_client_v1_client_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetExporterRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetExporterRequest) ProtoMessage() {} + +func (x *GetExporterRequest) ProtoReflect() protoreflect.Message { + mi := &file_jumpstarter_client_v1_client_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetExporterRequest.ProtoReflect.Descriptor instead. +func (*GetExporterRequest) Descriptor() ([]byte, []int) { + return file_jumpstarter_client_v1_client_proto_rawDescGZIP(), []int{2} +} + +func (x *GetExporterRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +type ListExportersRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + Filter string `protobuf:"bytes,4,opt,name=filter,proto3" json:"filter,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ListExportersRequest) Reset() { + *x = ListExportersRequest{} + mi := &file_jumpstarter_client_v1_client_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListExportersRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListExportersRequest) ProtoMessage() {} + +func (x *ListExportersRequest) ProtoReflect() protoreflect.Message { + mi := &file_jumpstarter_client_v1_client_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListExportersRequest.ProtoReflect.Descriptor instead. +func (*ListExportersRequest) Descriptor() ([]byte, []int) { + return file_jumpstarter_client_v1_client_proto_rawDescGZIP(), []int{3} +} + +func (x *ListExportersRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *ListExportersRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListExportersRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +func (x *ListExportersRequest) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} + +type ListExportersResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Exporters []*Exporter `protobuf:"bytes,1,rep,name=exporters,proto3" json:"exporters,omitempty"` + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ListExportersResponse) Reset() { + *x = ListExportersResponse{} + mi := &file_jumpstarter_client_v1_client_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListExportersResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListExportersResponse) ProtoMessage() {} + +func (x *ListExportersResponse) ProtoReflect() protoreflect.Message { + mi := &file_jumpstarter_client_v1_client_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListExportersResponse.ProtoReflect.Descriptor instead. +func (*ListExportersResponse) Descriptor() ([]byte, []int) { + return file_jumpstarter_client_v1_client_proto_rawDescGZIP(), []int{4} +} + +func (x *ListExportersResponse) GetExporters() []*Exporter { + if x != nil { + return x.Exporters + } + return nil +} + +func (x *ListExportersResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +type GetLeaseRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetLeaseRequest) Reset() { + *x = GetLeaseRequest{} + mi := &file_jumpstarter_client_v1_client_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetLeaseRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetLeaseRequest) ProtoMessage() {} + +func (x *GetLeaseRequest) ProtoReflect() protoreflect.Message { + mi := &file_jumpstarter_client_v1_client_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetLeaseRequest.ProtoReflect.Descriptor instead. +func (*GetLeaseRequest) Descriptor() ([]byte, []int) { + return file_jumpstarter_client_v1_client_proto_rawDescGZIP(), []int{5} +} + +func (x *GetLeaseRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +type ListLeasesRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + Filter string `protobuf:"bytes,4,opt,name=filter,proto3" json:"filter,omitempty"` + OnlyActive *bool `protobuf:"varint,5,opt,name=only_active,json=onlyActive,proto3,oneof" json:"only_active,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ListLeasesRequest) Reset() { + *x = ListLeasesRequest{} + mi := &file_jumpstarter_client_v1_client_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListLeasesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListLeasesRequest) ProtoMessage() {} + +func (x *ListLeasesRequest) ProtoReflect() protoreflect.Message { + mi := &file_jumpstarter_client_v1_client_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListLeasesRequest.ProtoReflect.Descriptor instead. +func (*ListLeasesRequest) Descriptor() ([]byte, []int) { + return file_jumpstarter_client_v1_client_proto_rawDescGZIP(), []int{6} +} + +func (x *ListLeasesRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *ListLeasesRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListLeasesRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +func (x *ListLeasesRequest) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} + +func (x *ListLeasesRequest) GetOnlyActive() bool { + if x != nil && x.OnlyActive != nil { + return *x.OnlyActive + } + return false +} + +type ListLeasesResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Leases []*Lease `protobuf:"bytes,1,rep,name=leases,proto3" json:"leases,omitempty"` + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ListLeasesResponse) Reset() { + *x = ListLeasesResponse{} + mi := &file_jumpstarter_client_v1_client_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListLeasesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListLeasesResponse) ProtoMessage() {} + +func (x *ListLeasesResponse) ProtoReflect() protoreflect.Message { + mi := &file_jumpstarter_client_v1_client_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListLeasesResponse.ProtoReflect.Descriptor instead. +func (*ListLeasesResponse) Descriptor() ([]byte, []int) { + return file_jumpstarter_client_v1_client_proto_rawDescGZIP(), []int{7} +} + +func (x *ListLeasesResponse) GetLeases() []*Lease { + if x != nil { + return x.Leases + } + return nil +} + +func (x *ListLeasesResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +type CreateLeaseRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + LeaseId string `protobuf:"bytes,2,opt,name=lease_id,json=leaseId,proto3" json:"lease_id,omitempty"` + Lease *Lease `protobuf:"bytes,3,opt,name=lease,proto3" json:"lease,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CreateLeaseRequest) Reset() { + *x = CreateLeaseRequest{} + mi := &file_jumpstarter_client_v1_client_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CreateLeaseRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateLeaseRequest) ProtoMessage() {} + +func (x *CreateLeaseRequest) ProtoReflect() protoreflect.Message { + mi := &file_jumpstarter_client_v1_client_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateLeaseRequest.ProtoReflect.Descriptor instead. +func (*CreateLeaseRequest) Descriptor() ([]byte, []int) { + return file_jumpstarter_client_v1_client_proto_rawDescGZIP(), []int{8} +} + +func (x *CreateLeaseRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *CreateLeaseRequest) GetLeaseId() string { + if x != nil { + return x.LeaseId + } + return "" +} + +func (x *CreateLeaseRequest) GetLease() *Lease { + if x != nil { + return x.Lease + } + return nil +} + +type UpdateLeaseRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Lease *Lease `protobuf:"bytes,1,opt,name=lease,proto3" json:"lease,omitempty"` + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UpdateLeaseRequest) Reset() { + *x = UpdateLeaseRequest{} + mi := &file_jumpstarter_client_v1_client_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UpdateLeaseRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateLeaseRequest) ProtoMessage() {} + +func (x *UpdateLeaseRequest) ProtoReflect() protoreflect.Message { + mi := &file_jumpstarter_client_v1_client_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateLeaseRequest.ProtoReflect.Descriptor instead. +func (*UpdateLeaseRequest) Descriptor() ([]byte, []int) { + return file_jumpstarter_client_v1_client_proto_rawDescGZIP(), []int{9} +} + +func (x *UpdateLeaseRequest) GetLease() *Lease { + if x != nil { + return x.Lease + } + return nil +} + +func (x *UpdateLeaseRequest) GetUpdateMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.UpdateMask + } + return nil +} + +type DeleteLeaseRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DeleteLeaseRequest) Reset() { + *x = DeleteLeaseRequest{} + mi := &file_jumpstarter_client_v1_client_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DeleteLeaseRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteLeaseRequest) ProtoMessage() {} + +func (x *DeleteLeaseRequest) ProtoReflect() protoreflect.Message { + mi := &file_jumpstarter_client_v1_client_proto_msgTypes[10] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteLeaseRequest.ProtoReflect.Descriptor instead. +func (*DeleteLeaseRequest) Descriptor() ([]byte, []int) { + return file_jumpstarter_client_v1_client_proto_rawDescGZIP(), []int{10} +} + +func (x *DeleteLeaseRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +var File_jumpstarter_client_v1_client_proto protoreflect.FileDescriptor + +const file_jumpstarter_client_v1_client_proto_rawDesc = "" + + "\n" + + "\"jumpstarter/client/v1/client.proto\x12\x15jumpstarter.client.v1\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a google/protobuf/field_mask.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1fjumpstarter/v1/kubernetes.proto\x1a\x1bjumpstarter/v1/common.proto\"\xe0\x02\n" + + "\bExporter\x12\x17\n" + + "\x04name\x18\x01 \x01(\tB\x03\xe0A\bR\x04name\x12C\n" + + "\x06labels\x18\x02 \x03(\v2+.jumpstarter.client.v1.Exporter.LabelsEntryR\x06labels\x12\x1d\n" + + "\x06online\x18\x03 \x01(\bB\x05\xe0A\x03\x18\x01R\x06online\x12;\n" + + "\x06status\x18\x04 \x01(\x0e2\x1e.jumpstarter.v1.ExporterStatusB\x03\xe0A\x03R\x06status\x1a9\n" + + "\vLabelsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01:_\xeaA\\\n" + + "\x18jumpstarter.dev/Exporter\x12+namespaces/{namespace}/exporters/{exporter}*\texporters2\bexporter\"\xfa\x06\n" + + "\x05Lease\x12\x17\n" + + "\x04name\x18\x01 \x01(\tB\x03\xe0A\bR\x04name\x12\"\n" + + "\bselector\x18\x02 \x01(\tB\x06\xe0A\x02\xe0A\x05R\bselector\x12:\n" + + "\bduration\x18\x03 \x01(\v2\x19.google.protobuf.DurationH\x00R\bduration\x88\x01\x01\x12M\n" + + "\x12effective_duration\x18\x04 \x01(\v2\x19.google.protobuf.DurationB\x03\xe0A\x03R\x11effectiveDuration\x12>\n" + + "\n" + + "begin_time\x18\x05 \x01(\v2\x1a.google.protobuf.TimestampH\x01R\tbeginTime\x88\x01\x01\x12V\n" + + "\x14effective_begin_time\x18\x06 \x01(\v2\x1a.google.protobuf.TimestampB\x03\xe0A\x03H\x02R\x12effectiveBeginTime\x88\x01\x01\x12:\n" + + "\bend_time\x18\a \x01(\v2\x1a.google.protobuf.TimestampH\x03R\aendTime\x88\x01\x01\x12R\n" + + "\x12effective_end_time\x18\b \x01(\v2\x1a.google.protobuf.TimestampB\x03\xe0A\x03H\x04R\x10effectiveEndTime\x88\x01\x01\x12;\n" + + "\x06client\x18\t \x01(\tB\x1e\xe0A\x03\xfaA\x18\n" + + "\x16jumpstarter.dev/ClientH\x05R\x06client\x88\x01\x01\x12A\n" + + "\bexporter\x18\n" + + " \x01(\tB \xe0A\x03\xfaA\x1a\n" + + "\x18jumpstarter.dev/ExporterH\x06R\bexporter\x88\x01\x01\x12>\n" + + "\n" + + "conditions\x18\v \x03(\v2\x19.jumpstarter.v1.ConditionB\x03\xe0A\x03R\n" + + "conditions:P\xeaAM\n" + + "\x15jumpstarter.dev/Lease\x12%namespaces/{namespace}/leases/{lease}*\x06leases2\x05leaseB\v\n" + + "\t_durationB\r\n" + + "\v_begin_timeB\x17\n" + + "\x15_effective_begin_timeB\v\n" + + "\t_end_timeB\x15\n" + + "\x13_effective_end_timeB\t\n" + + "\a_clientB\v\n" + + "\t_exporter\"J\n" + + "\x12GetExporterRequest\x124\n" + + "\x04name\x18\x01 \x01(\tB \xe0A\x02\xfaA\x1a\n" + + "\x18jumpstarter.dev/ExporterR\x04name\"\xb3\x01\n" + + "\x14ListExportersRequest\x128\n" + + "\x06parent\x18\x01 \x01(\tB \xe0A\x02\xfaA\x1a\x12\x18jumpstarter.dev/ExporterR\x06parent\x12 \n" + + "\tpage_size\x18\x02 \x01(\x05B\x03\xe0A\x01R\bpageSize\x12\"\n" + + "\n" + + "page_token\x18\x03 \x01(\tB\x03\xe0A\x01R\tpageToken\x12\x1b\n" + + "\x06filter\x18\x04 \x01(\tB\x03\xe0A\x01R\x06filter\"~\n" + + "\x15ListExportersResponse\x12=\n" + + "\texporters\x18\x01 \x03(\v2\x1f.jumpstarter.client.v1.ExporterR\texporters\x12&\n" + + "\x0fnext_page_token\x18\x02 \x01(\tR\rnextPageToken\"D\n" + + "\x0fGetLeaseRequest\x121\n" + + "\x04name\x18\x01 \x01(\tB\x1d\xe0A\x02\xfaA\x17\n" + + "\x15jumpstarter.dev/LeaseR\x04name\"\xe8\x01\n" + + "\x11ListLeasesRequest\x125\n" + + "\x06parent\x18\x01 \x01(\tB\x1d\xe0A\x02\xfaA\x17\x12\x15jumpstarter.dev/LeaseR\x06parent\x12 \n" + + "\tpage_size\x18\x02 \x01(\x05B\x03\xe0A\x01R\bpageSize\x12\"\n" + + "\n" + + "page_token\x18\x03 \x01(\tB\x03\xe0A\x01R\tpageToken\x12\x1b\n" + + "\x06filter\x18\x04 \x01(\tB\x03\xe0A\x01R\x06filter\x12)\n" + + "\vonly_active\x18\x05 \x01(\bB\x03\xe0A\x01H\x00R\n" + + "onlyActive\x88\x01\x01B\x0e\n" + + "\f_only_active\"r\n" + + "\x12ListLeasesResponse\x124\n" + + "\x06leases\x18\x01 \x03(\v2\x1c.jumpstarter.client.v1.LeaseR\x06leases\x12&\n" + + "\x0fnext_page_token\x18\x02 \x01(\tR\rnextPageToken\"\xa4\x01\n" + + "\x12CreateLeaseRequest\x125\n" + + "\x06parent\x18\x01 \x01(\tB\x1d\xe0A\x02\xfaA\x17\x12\x15jumpstarter.dev/LeaseR\x06parent\x12\x1e\n" + + "\blease_id\x18\x02 \x01(\tB\x03\xe0A\x01R\aleaseId\x127\n" + + "\x05lease\x18\x03 \x01(\v2\x1c.jumpstarter.client.v1.LeaseB\x03\xe0A\x02R\x05lease\"\x8f\x01\n" + + "\x12UpdateLeaseRequest\x127\n" + + "\x05lease\x18\x01 \x01(\v2\x1c.jumpstarter.client.v1.LeaseB\x03\xe0A\x02R\x05lease\x12@\n" + + "\vupdate_mask\x18\x02 \x01(\v2\x1a.google.protobuf.FieldMaskB\x03\xe0A\x01R\n" + + "updateMask\"G\n" + + "\x12DeleteLeaseRequest\x121\n" + + "\x04name\x18\x01 \x01(\tB\x1d\xe0A\x02\xfaA\x17\n" + + "\x15jumpstarter.dev/LeaseR\x04name2\xa7\b\n" + + "\rClientService\x12\x8d\x01\n" + + "\vGetExporter\x12).jumpstarter.client.v1.GetExporterRequest\x1a\x1f.jumpstarter.client.v1.Exporter\"2\xdaA\x04name\x82\xd3\xe4\x93\x02%\x12#/v1/{name=namespaces/*/exporters/*}\x12\xa0\x01\n" + + "\rListExporters\x12+.jumpstarter.client.v1.ListExportersRequest\x1a,.jumpstarter.client.v1.ListExportersResponse\"4\xdaA\x06parent\x82\xd3\xe4\x93\x02%\x12#/v1/{parent=namespaces/*}/exporters\x12\x81\x01\n" + + "\bGetLease\x12&.jumpstarter.client.v1.GetLeaseRequest\x1a\x1c.jumpstarter.client.v1.Lease\"/\xdaA\x04name\x82\xd3\xe4\x93\x02\"\x12 /v1/{name=namespaces/*/leases/*}\x12\x94\x01\n" + + "\n" + + "ListLeases\x12(.jumpstarter.client.v1.ListLeasesRequest\x1a).jumpstarter.client.v1.ListLeasesResponse\"1\xdaA\x06parent\x82\xd3\xe4\x93\x02\"\x12 /v1/{parent=namespaces/*}/leases\x12\x9f\x01\n" + + "\vCreateLease\x12).jumpstarter.client.v1.CreateLeaseRequest\x1a\x1c.jumpstarter.client.v1.Lease\"G\xdaA\x15parent,lease,lease_id\x82\xd3\xe4\x93\x02):\x05lease\" /v1/{parent=namespaces/*}/leases\x12\xa1\x01\n" + + "\vUpdateLease\x12).jumpstarter.client.v1.UpdateLeaseRequest\x1a\x1c.jumpstarter.client.v1.Lease\"I\xdaA\x11lease,update_mask\x82\xd3\xe4\x93\x02/:\x05lease2&/v1/{lease.name=namespaces/*/leases/*}\x12\x81\x01\n" + + "\vDeleteLease\x12).jumpstarter.client.v1.DeleteLeaseRequest\x1a\x16.google.protobuf.Empty\"/\xdaA\x04name\x82\xd3\xe4\x93\x02\"* /v1/{name=namespaces/*/leases/*}B\x82\x02\n" + + "\x19com.jumpstarter.client.v1B\vClientProtoP\x01Zbgithub.com/jumpstarter-dev/jumpstarter-controller/internal/protocol/jumpstarter/client/v1;clientv1\xa2\x02\x03JCX\xaa\x02\x15Jumpstarter.Client.V1\xca\x02\x15Jumpstarter\\Client\\V1\xe2\x02!Jumpstarter\\Client\\V1\\GPBMetadata\xea\x02\x17Jumpstarter::Client::V1b\x06proto3" + +var ( + file_jumpstarter_client_v1_client_proto_rawDescOnce sync.Once + file_jumpstarter_client_v1_client_proto_rawDescData []byte +) + +func file_jumpstarter_client_v1_client_proto_rawDescGZIP() []byte { + file_jumpstarter_client_v1_client_proto_rawDescOnce.Do(func() { + file_jumpstarter_client_v1_client_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_jumpstarter_client_v1_client_proto_rawDesc), len(file_jumpstarter_client_v1_client_proto_rawDesc))) + }) + return file_jumpstarter_client_v1_client_proto_rawDescData +} + +var file_jumpstarter_client_v1_client_proto_msgTypes = make([]protoimpl.MessageInfo, 12) +var file_jumpstarter_client_v1_client_proto_goTypes = []any{ + (*Exporter)(nil), // 0: jumpstarter.client.v1.Exporter + (*Lease)(nil), // 1: jumpstarter.client.v1.Lease + (*GetExporterRequest)(nil), // 2: jumpstarter.client.v1.GetExporterRequest + (*ListExportersRequest)(nil), // 3: jumpstarter.client.v1.ListExportersRequest + (*ListExportersResponse)(nil), // 4: jumpstarter.client.v1.ListExportersResponse + (*GetLeaseRequest)(nil), // 5: jumpstarter.client.v1.GetLeaseRequest + (*ListLeasesRequest)(nil), // 6: jumpstarter.client.v1.ListLeasesRequest + (*ListLeasesResponse)(nil), // 7: jumpstarter.client.v1.ListLeasesResponse + (*CreateLeaseRequest)(nil), // 8: jumpstarter.client.v1.CreateLeaseRequest + (*UpdateLeaseRequest)(nil), // 9: jumpstarter.client.v1.UpdateLeaseRequest + (*DeleteLeaseRequest)(nil), // 10: jumpstarter.client.v1.DeleteLeaseRequest + nil, // 11: jumpstarter.client.v1.Exporter.LabelsEntry + (v1.ExporterStatus)(0), // 12: jumpstarter.v1.ExporterStatus + (*durationpb.Duration)(nil), // 13: google.protobuf.Duration + (*timestamppb.Timestamp)(nil), // 14: google.protobuf.Timestamp + (*v1.Condition)(nil), // 15: jumpstarter.v1.Condition + (*fieldmaskpb.FieldMask)(nil), // 16: google.protobuf.FieldMask + (*emptypb.Empty)(nil), // 17: google.protobuf.Empty +} +var file_jumpstarter_client_v1_client_proto_depIdxs = []int32{ + 11, // 0: jumpstarter.client.v1.Exporter.labels:type_name -> jumpstarter.client.v1.Exporter.LabelsEntry + 12, // 1: jumpstarter.client.v1.Exporter.status:type_name -> jumpstarter.v1.ExporterStatus + 13, // 2: jumpstarter.client.v1.Lease.duration:type_name -> google.protobuf.Duration + 13, // 3: jumpstarter.client.v1.Lease.effective_duration:type_name -> google.protobuf.Duration + 14, // 4: jumpstarter.client.v1.Lease.begin_time:type_name -> google.protobuf.Timestamp + 14, // 5: jumpstarter.client.v1.Lease.effective_begin_time:type_name -> google.protobuf.Timestamp + 14, // 6: jumpstarter.client.v1.Lease.end_time:type_name -> google.protobuf.Timestamp + 14, // 7: jumpstarter.client.v1.Lease.effective_end_time:type_name -> google.protobuf.Timestamp + 15, // 8: jumpstarter.client.v1.Lease.conditions:type_name -> jumpstarter.v1.Condition + 0, // 9: jumpstarter.client.v1.ListExportersResponse.exporters:type_name -> jumpstarter.client.v1.Exporter + 1, // 10: jumpstarter.client.v1.ListLeasesResponse.leases:type_name -> jumpstarter.client.v1.Lease + 1, // 11: jumpstarter.client.v1.CreateLeaseRequest.lease:type_name -> jumpstarter.client.v1.Lease + 1, // 12: jumpstarter.client.v1.UpdateLeaseRequest.lease:type_name -> jumpstarter.client.v1.Lease + 16, // 13: jumpstarter.client.v1.UpdateLeaseRequest.update_mask:type_name -> google.protobuf.FieldMask + 2, // 14: jumpstarter.client.v1.ClientService.GetExporter:input_type -> jumpstarter.client.v1.GetExporterRequest + 3, // 15: jumpstarter.client.v1.ClientService.ListExporters:input_type -> jumpstarter.client.v1.ListExportersRequest + 5, // 16: jumpstarter.client.v1.ClientService.GetLease:input_type -> jumpstarter.client.v1.GetLeaseRequest + 6, // 17: jumpstarter.client.v1.ClientService.ListLeases:input_type -> jumpstarter.client.v1.ListLeasesRequest + 8, // 18: jumpstarter.client.v1.ClientService.CreateLease:input_type -> jumpstarter.client.v1.CreateLeaseRequest + 9, // 19: jumpstarter.client.v1.ClientService.UpdateLease:input_type -> jumpstarter.client.v1.UpdateLeaseRequest + 10, // 20: jumpstarter.client.v1.ClientService.DeleteLease:input_type -> jumpstarter.client.v1.DeleteLeaseRequest + 0, // 21: jumpstarter.client.v1.ClientService.GetExporter:output_type -> jumpstarter.client.v1.Exporter + 4, // 22: jumpstarter.client.v1.ClientService.ListExporters:output_type -> jumpstarter.client.v1.ListExportersResponse + 1, // 23: jumpstarter.client.v1.ClientService.GetLease:output_type -> jumpstarter.client.v1.Lease + 7, // 24: jumpstarter.client.v1.ClientService.ListLeases:output_type -> jumpstarter.client.v1.ListLeasesResponse + 1, // 25: jumpstarter.client.v1.ClientService.CreateLease:output_type -> jumpstarter.client.v1.Lease + 1, // 26: jumpstarter.client.v1.ClientService.UpdateLease:output_type -> jumpstarter.client.v1.Lease + 17, // 27: jumpstarter.client.v1.ClientService.DeleteLease:output_type -> google.protobuf.Empty + 21, // [21:28] is the sub-list for method output_type + 14, // [14:21] is the sub-list for method input_type + 14, // [14:14] is the sub-list for extension type_name + 14, // [14:14] is the sub-list for extension extendee + 0, // [0:14] is the sub-list for field type_name +} + +func init() { file_jumpstarter_client_v1_client_proto_init() } +func file_jumpstarter_client_v1_client_proto_init() { + if File_jumpstarter_client_v1_client_proto != nil { + return + } + file_jumpstarter_client_v1_client_proto_msgTypes[1].OneofWrappers = []any{} + file_jumpstarter_client_v1_client_proto_msgTypes[6].OneofWrappers = []any{} + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_jumpstarter_client_v1_client_proto_rawDesc), len(file_jumpstarter_client_v1_client_proto_rawDesc)), + NumEnums: 0, + NumMessages: 12, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_jumpstarter_client_v1_client_proto_goTypes, + DependencyIndexes: file_jumpstarter_client_v1_client_proto_depIdxs, + MessageInfos: file_jumpstarter_client_v1_client_proto_msgTypes, + }.Build() + File_jumpstarter_client_v1_client_proto = out.File + file_jumpstarter_client_v1_client_proto_goTypes = nil + file_jumpstarter_client_v1_client_proto_depIdxs = nil +} diff --git a/controller/internal/protocol/jumpstarter/client/v1/client.pb.gw.go b/controller/internal/protocol/jumpstarter/client/v1/client.pb.gw.go new file mode 100644 index 000000000..d0849304d --- /dev/null +++ b/controller/internal/protocol/jumpstarter/client/v1/client.pb.gw.go @@ -0,0 +1,727 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: jumpstarter/client/v1/client.proto + +/* +Package clientv1 is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package clientv1 + +import ( + "context" + "errors" + "io" + "net/http" + + "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" + "github.com/grpc-ecosystem/grpc-gateway/v2/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" +) + +// Suppress "imported and not used" errors +var ( + _ codes.Code + _ io.Reader + _ status.Status + _ = errors.New + _ = runtime.String + _ = utilities.NewDoubleArray + _ = metadata.Join +) + +func request_ClientService_GetExporter_0(ctx context.Context, marshaler runtime.Marshaler, client ClientServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq GetExporterRequest + metadata runtime.ServerMetadata + err error + ) + if req.Body != nil { + _, _ = io.Copy(io.Discard, req.Body) + } + val, ok := pathParams["name"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "name") + } + protoReq.Name, err = runtime.String(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "name", err) + } + msg, err := client.GetExporter(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err +} + +func local_request_ClientService_GetExporter_0(ctx context.Context, marshaler runtime.Marshaler, server ClientServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq GetExporterRequest + metadata runtime.ServerMetadata + err error + ) + val, ok := pathParams["name"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "name") + } + protoReq.Name, err = runtime.String(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "name", err) + } + msg, err := server.GetExporter(ctx, &protoReq) + return msg, metadata, err +} + +var filter_ClientService_ListExporters_0 = &utilities.DoubleArray{Encoding: map[string]int{"parent": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} + +func request_ClientService_ListExporters_0(ctx context.Context, marshaler runtime.Marshaler, client ClientServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq ListExportersRequest + metadata runtime.ServerMetadata + err error + ) + if req.Body != nil { + _, _ = io.Copy(io.Discard, req.Body) + } + val, ok := pathParams["parent"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "parent") + } + protoReq.Parent, err = runtime.String(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "parent", err) + } + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_ClientService_ListExporters_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + msg, err := client.ListExporters(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err +} + +func local_request_ClientService_ListExporters_0(ctx context.Context, marshaler runtime.Marshaler, server ClientServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq ListExportersRequest + metadata runtime.ServerMetadata + err error + ) + val, ok := pathParams["parent"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "parent") + } + protoReq.Parent, err = runtime.String(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "parent", err) + } + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_ClientService_ListExporters_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + msg, err := server.ListExporters(ctx, &protoReq) + return msg, metadata, err +} + +func request_ClientService_GetLease_0(ctx context.Context, marshaler runtime.Marshaler, client ClientServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq GetLeaseRequest + metadata runtime.ServerMetadata + err error + ) + if req.Body != nil { + _, _ = io.Copy(io.Discard, req.Body) + } + val, ok := pathParams["name"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "name") + } + protoReq.Name, err = runtime.String(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "name", err) + } + msg, err := client.GetLease(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err +} + +func local_request_ClientService_GetLease_0(ctx context.Context, marshaler runtime.Marshaler, server ClientServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq GetLeaseRequest + metadata runtime.ServerMetadata + err error + ) + val, ok := pathParams["name"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "name") + } + protoReq.Name, err = runtime.String(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "name", err) + } + msg, err := server.GetLease(ctx, &protoReq) + return msg, metadata, err +} + +var filter_ClientService_ListLeases_0 = &utilities.DoubleArray{Encoding: map[string]int{"parent": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} + +func request_ClientService_ListLeases_0(ctx context.Context, marshaler runtime.Marshaler, client ClientServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq ListLeasesRequest + metadata runtime.ServerMetadata + err error + ) + if req.Body != nil { + _, _ = io.Copy(io.Discard, req.Body) + } + val, ok := pathParams["parent"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "parent") + } + protoReq.Parent, err = runtime.String(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "parent", err) + } + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_ClientService_ListLeases_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + msg, err := client.ListLeases(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err +} + +func local_request_ClientService_ListLeases_0(ctx context.Context, marshaler runtime.Marshaler, server ClientServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq ListLeasesRequest + metadata runtime.ServerMetadata + err error + ) + val, ok := pathParams["parent"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "parent") + } + protoReq.Parent, err = runtime.String(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "parent", err) + } + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_ClientService_ListLeases_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + msg, err := server.ListLeases(ctx, &protoReq) + return msg, metadata, err +} + +var filter_ClientService_CreateLease_0 = &utilities.DoubleArray{Encoding: map[string]int{"lease": 0, "parent": 1}, Base: []int{1, 1, 2, 0, 0}, Check: []int{0, 1, 1, 2, 3}} + +func request_ClientService_CreateLease_0(ctx context.Context, marshaler runtime.Marshaler, client ClientServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq CreateLeaseRequest + metadata runtime.ServerMetadata + err error + ) + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq.Lease); err != nil && !errors.Is(err, io.EOF) { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if req.Body != nil { + _, _ = io.Copy(io.Discard, req.Body) + } + val, ok := pathParams["parent"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "parent") + } + protoReq.Parent, err = runtime.String(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "parent", err) + } + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_ClientService_CreateLease_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + msg, err := client.CreateLease(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err +} + +func local_request_ClientService_CreateLease_0(ctx context.Context, marshaler runtime.Marshaler, server ClientServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq CreateLeaseRequest + metadata runtime.ServerMetadata + err error + ) + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq.Lease); err != nil && !errors.Is(err, io.EOF) { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + val, ok := pathParams["parent"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "parent") + } + protoReq.Parent, err = runtime.String(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "parent", err) + } + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_ClientService_CreateLease_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + msg, err := server.CreateLease(ctx, &protoReq) + return msg, metadata, err +} + +var filter_ClientService_UpdateLease_0 = &utilities.DoubleArray{Encoding: map[string]int{"lease": 0, "name": 1}, Base: []int{1, 2, 1, 0, 0}, Check: []int{0, 1, 2, 3, 2}} + +func request_ClientService_UpdateLease_0(ctx context.Context, marshaler runtime.Marshaler, client ClientServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq UpdateLeaseRequest + metadata runtime.ServerMetadata + err error + ) + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq.Lease); err != nil && !errors.Is(err, io.EOF) { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if req.Body != nil { + _, _ = io.Copy(io.Discard, req.Body) + } + if protoReq.UpdateMask == nil || len(protoReq.UpdateMask.GetPaths()) == 0 { + if fieldMask, err := runtime.FieldMaskFromRequestBody(newReader(), protoReq.Lease); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } else { + protoReq.UpdateMask = fieldMask + } + } + val, ok := pathParams["lease.name"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "lease.name") + } + err = runtime.PopulateFieldFromPath(&protoReq, "lease.name", val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "lease.name", err) + } + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_ClientService_UpdateLease_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + msg, err := client.UpdateLease(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err +} + +func local_request_ClientService_UpdateLease_0(ctx context.Context, marshaler runtime.Marshaler, server ClientServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq UpdateLeaseRequest + metadata runtime.ServerMetadata + err error + ) + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq.Lease); err != nil && !errors.Is(err, io.EOF) { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if protoReq.UpdateMask == nil || len(protoReq.UpdateMask.GetPaths()) == 0 { + if fieldMask, err := runtime.FieldMaskFromRequestBody(newReader(), protoReq.Lease); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } else { + protoReq.UpdateMask = fieldMask + } + } + val, ok := pathParams["lease.name"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "lease.name") + } + err = runtime.PopulateFieldFromPath(&protoReq, "lease.name", val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "lease.name", err) + } + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_ClientService_UpdateLease_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + msg, err := server.UpdateLease(ctx, &protoReq) + return msg, metadata, err +} + +func request_ClientService_DeleteLease_0(ctx context.Context, marshaler runtime.Marshaler, client ClientServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq DeleteLeaseRequest + metadata runtime.ServerMetadata + err error + ) + if req.Body != nil { + _, _ = io.Copy(io.Discard, req.Body) + } + val, ok := pathParams["name"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "name") + } + protoReq.Name, err = runtime.String(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "name", err) + } + msg, err := client.DeleteLease(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err +} + +func local_request_ClientService_DeleteLease_0(ctx context.Context, marshaler runtime.Marshaler, server ClientServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq DeleteLeaseRequest + metadata runtime.ServerMetadata + err error + ) + val, ok := pathParams["name"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "name") + } + protoReq.Name, err = runtime.String(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "name", err) + } + msg, err := server.DeleteLease(ctx, &protoReq) + return msg, metadata, err +} + +// RegisterClientServiceHandlerServer registers the http handlers for service ClientService to "mux". +// UnaryRPC :call ClientServiceServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterClientServiceHandlerFromEndpoint instead. +// GRPC interceptors will not work for this type of registration. To use interceptors, you must use the "runtime.WithMiddlewares" option in the "runtime.NewServeMux" call. +func RegisterClientServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server ClientServiceServer) error { + mux.Handle(http.MethodGet, pattern_ClientService_GetExporter_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/jumpstarter.client.v1.ClientService/GetExporter", runtime.WithHTTPPathPattern("/v1/{name=namespaces/*/exporters/*}")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_ClientService_GetExporter_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + forward_ClientService_GetExporter_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) + mux.Handle(http.MethodGet, pattern_ClientService_ListExporters_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/jumpstarter.client.v1.ClientService/ListExporters", runtime.WithHTTPPathPattern("/v1/{parent=namespaces/*}/exporters")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_ClientService_ListExporters_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + forward_ClientService_ListExporters_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) + mux.Handle(http.MethodGet, pattern_ClientService_GetLease_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/jumpstarter.client.v1.ClientService/GetLease", runtime.WithHTTPPathPattern("/v1/{name=namespaces/*/leases/*}")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_ClientService_GetLease_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + forward_ClientService_GetLease_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) + mux.Handle(http.MethodGet, pattern_ClientService_ListLeases_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/jumpstarter.client.v1.ClientService/ListLeases", runtime.WithHTTPPathPattern("/v1/{parent=namespaces/*}/leases")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_ClientService_ListLeases_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + forward_ClientService_ListLeases_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) + mux.Handle(http.MethodPost, pattern_ClientService_CreateLease_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/jumpstarter.client.v1.ClientService/CreateLease", runtime.WithHTTPPathPattern("/v1/{parent=namespaces/*}/leases")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_ClientService_CreateLease_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + forward_ClientService_CreateLease_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) + mux.Handle(http.MethodPatch, pattern_ClientService_UpdateLease_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/jumpstarter.client.v1.ClientService/UpdateLease", runtime.WithHTTPPathPattern("/v1/{lease.name=namespaces/*/leases/*}")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_ClientService_UpdateLease_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + forward_ClientService_UpdateLease_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) + mux.Handle(http.MethodDelete, pattern_ClientService_DeleteLease_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/jumpstarter.client.v1.ClientService/DeleteLease", runtime.WithHTTPPathPattern("/v1/{name=namespaces/*/leases/*}")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_ClientService_DeleteLease_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + forward_ClientService_DeleteLease_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) + + return nil +} + +// RegisterClientServiceHandlerFromEndpoint is same as RegisterClientServiceHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterClientServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.NewClient(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + return RegisterClientServiceHandler(ctx, mux, conn) +} + +// RegisterClientServiceHandler registers the http handlers for service ClientService to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterClientServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterClientServiceHandlerClient(ctx, mux, NewClientServiceClient(conn)) +} + +// RegisterClientServiceHandlerClient registers the http handlers for service ClientService +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "ClientServiceClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "ClientServiceClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "ClientServiceClient" to call the correct interceptors. This client ignores the HTTP middlewares. +func RegisterClientServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client ClientServiceClient) error { + mux.Handle(http.MethodGet, pattern_ClientService_GetExporter_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/jumpstarter.client.v1.ClientService/GetExporter", runtime.WithHTTPPathPattern("/v1/{name=namespaces/*/exporters/*}")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_ClientService_GetExporter_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + forward_ClientService_GetExporter_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) + mux.Handle(http.MethodGet, pattern_ClientService_ListExporters_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/jumpstarter.client.v1.ClientService/ListExporters", runtime.WithHTTPPathPattern("/v1/{parent=namespaces/*}/exporters")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_ClientService_ListExporters_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + forward_ClientService_ListExporters_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) + mux.Handle(http.MethodGet, pattern_ClientService_GetLease_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/jumpstarter.client.v1.ClientService/GetLease", runtime.WithHTTPPathPattern("/v1/{name=namespaces/*/leases/*}")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_ClientService_GetLease_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + forward_ClientService_GetLease_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) + mux.Handle(http.MethodGet, pattern_ClientService_ListLeases_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/jumpstarter.client.v1.ClientService/ListLeases", runtime.WithHTTPPathPattern("/v1/{parent=namespaces/*}/leases")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_ClientService_ListLeases_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + forward_ClientService_ListLeases_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) + mux.Handle(http.MethodPost, pattern_ClientService_CreateLease_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/jumpstarter.client.v1.ClientService/CreateLease", runtime.WithHTTPPathPattern("/v1/{parent=namespaces/*}/leases")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_ClientService_CreateLease_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + forward_ClientService_CreateLease_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) + mux.Handle(http.MethodPatch, pattern_ClientService_UpdateLease_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/jumpstarter.client.v1.ClientService/UpdateLease", runtime.WithHTTPPathPattern("/v1/{lease.name=namespaces/*/leases/*}")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_ClientService_UpdateLease_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + forward_ClientService_UpdateLease_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) + mux.Handle(http.MethodDelete, pattern_ClientService_DeleteLease_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/jumpstarter.client.v1.ClientService/DeleteLease", runtime.WithHTTPPathPattern("/v1/{name=namespaces/*/leases/*}")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_ClientService_DeleteLease_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + forward_ClientService_DeleteLease_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) + return nil +} + +var ( + pattern_ClientService_GetExporter_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 2, 2, 1, 0, 4, 4, 5, 3}, []string{"v1", "namespaces", "exporters", "name"}, "")) + pattern_ClientService_ListExporters_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 4, 2, 5, 2, 2, 3}, []string{"v1", "namespaces", "parent", "exporters"}, "")) + pattern_ClientService_GetLease_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 2, 2, 1, 0, 4, 4, 5, 3}, []string{"v1", "namespaces", "leases", "name"}, "")) + pattern_ClientService_ListLeases_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 4, 2, 5, 2, 2, 3}, []string{"v1", "namespaces", "parent", "leases"}, "")) + pattern_ClientService_CreateLease_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 4, 2, 5, 2, 2, 3}, []string{"v1", "namespaces", "parent", "leases"}, "")) + pattern_ClientService_UpdateLease_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 2, 2, 1, 0, 4, 4, 5, 3}, []string{"v1", "namespaces", "leases", "lease.name"}, "")) + pattern_ClientService_DeleteLease_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 2, 2, 1, 0, 4, 4, 5, 3}, []string{"v1", "namespaces", "leases", "name"}, "")) +) + +var ( + forward_ClientService_GetExporter_0 = runtime.ForwardResponseMessage + forward_ClientService_ListExporters_0 = runtime.ForwardResponseMessage + forward_ClientService_GetLease_0 = runtime.ForwardResponseMessage + forward_ClientService_ListLeases_0 = runtime.ForwardResponseMessage + forward_ClientService_CreateLease_0 = runtime.ForwardResponseMessage + forward_ClientService_UpdateLease_0 = runtime.ForwardResponseMessage + forward_ClientService_DeleteLease_0 = runtime.ForwardResponseMessage +) diff --git a/controller/internal/protocol/jumpstarter/client/v1/client_grpc.pb.go b/controller/internal/protocol/jumpstarter/client/v1/client_grpc.pb.go new file mode 100644 index 000000000..e3a4a5a92 --- /dev/null +++ b/controller/internal/protocol/jumpstarter/client/v1/client_grpc.pb.go @@ -0,0 +1,357 @@ +// Copyright 2024 The Jumpstarter Authors +// (-- api-linter: core::0215::foreign-type-reference=disabled +// (-- api-linter: core::0192::has-comments=disabled +// (-- api-linter: core::0191::java-package=disabled +// (-- api-linter: core::0191::java-outer-classname=disabled +// (-- api-linter: core::0191::java-multiple-files=disabled + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.5.1 +// - protoc (unknown) +// source: jumpstarter/client/v1/client.proto + +package clientv1 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + emptypb "google.golang.org/protobuf/types/known/emptypb" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 + +const ( + ClientService_GetExporter_FullMethodName = "/jumpstarter.client.v1.ClientService/GetExporter" + ClientService_ListExporters_FullMethodName = "/jumpstarter.client.v1.ClientService/ListExporters" + ClientService_GetLease_FullMethodName = "/jumpstarter.client.v1.ClientService/GetLease" + ClientService_ListLeases_FullMethodName = "/jumpstarter.client.v1.ClientService/ListLeases" + ClientService_CreateLease_FullMethodName = "/jumpstarter.client.v1.ClientService/CreateLease" + ClientService_UpdateLease_FullMethodName = "/jumpstarter.client.v1.ClientService/UpdateLease" + ClientService_DeleteLease_FullMethodName = "/jumpstarter.client.v1.ClientService/DeleteLease" +) + +// ClientServiceClient is the client API for ClientService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ClientServiceClient interface { + GetExporter(ctx context.Context, in *GetExporterRequest, opts ...grpc.CallOption) (*Exporter, error) + ListExporters(ctx context.Context, in *ListExportersRequest, opts ...grpc.CallOption) (*ListExportersResponse, error) + GetLease(ctx context.Context, in *GetLeaseRequest, opts ...grpc.CallOption) (*Lease, error) + ListLeases(ctx context.Context, in *ListLeasesRequest, opts ...grpc.CallOption) (*ListLeasesResponse, error) + CreateLease(ctx context.Context, in *CreateLeaseRequest, opts ...grpc.CallOption) (*Lease, error) + UpdateLease(ctx context.Context, in *UpdateLeaseRequest, opts ...grpc.CallOption) (*Lease, error) + DeleteLease(ctx context.Context, in *DeleteLeaseRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) +} + +type clientServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewClientServiceClient(cc grpc.ClientConnInterface) ClientServiceClient { + return &clientServiceClient{cc} +} + +func (c *clientServiceClient) GetExporter(ctx context.Context, in *GetExporterRequest, opts ...grpc.CallOption) (*Exporter, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(Exporter) + err := c.cc.Invoke(ctx, ClientService_GetExporter_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clientServiceClient) ListExporters(ctx context.Context, in *ListExportersRequest, opts ...grpc.CallOption) (*ListExportersResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(ListExportersResponse) + err := c.cc.Invoke(ctx, ClientService_ListExporters_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clientServiceClient) GetLease(ctx context.Context, in *GetLeaseRequest, opts ...grpc.CallOption) (*Lease, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(Lease) + err := c.cc.Invoke(ctx, ClientService_GetLease_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clientServiceClient) ListLeases(ctx context.Context, in *ListLeasesRequest, opts ...grpc.CallOption) (*ListLeasesResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(ListLeasesResponse) + err := c.cc.Invoke(ctx, ClientService_ListLeases_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clientServiceClient) CreateLease(ctx context.Context, in *CreateLeaseRequest, opts ...grpc.CallOption) (*Lease, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(Lease) + err := c.cc.Invoke(ctx, ClientService_CreateLease_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clientServiceClient) UpdateLease(ctx context.Context, in *UpdateLeaseRequest, opts ...grpc.CallOption) (*Lease, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(Lease) + err := c.cc.Invoke(ctx, ClientService_UpdateLease_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clientServiceClient) DeleteLease(ctx context.Context, in *DeleteLeaseRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, ClientService_DeleteLease_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ClientServiceServer is the server API for ClientService service. +// All implementations must embed UnimplementedClientServiceServer +// for forward compatibility. +type ClientServiceServer interface { + GetExporter(context.Context, *GetExporterRequest) (*Exporter, error) + ListExporters(context.Context, *ListExportersRequest) (*ListExportersResponse, error) + GetLease(context.Context, *GetLeaseRequest) (*Lease, error) + ListLeases(context.Context, *ListLeasesRequest) (*ListLeasesResponse, error) + CreateLease(context.Context, *CreateLeaseRequest) (*Lease, error) + UpdateLease(context.Context, *UpdateLeaseRequest) (*Lease, error) + DeleteLease(context.Context, *DeleteLeaseRequest) (*emptypb.Empty, error) + mustEmbedUnimplementedClientServiceServer() +} + +// UnimplementedClientServiceServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedClientServiceServer struct{} + +func (UnimplementedClientServiceServer) GetExporter(context.Context, *GetExporterRequest) (*Exporter, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetExporter not implemented") +} +func (UnimplementedClientServiceServer) ListExporters(context.Context, *ListExportersRequest) (*ListExportersResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListExporters not implemented") +} +func (UnimplementedClientServiceServer) GetLease(context.Context, *GetLeaseRequest) (*Lease, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetLease not implemented") +} +func (UnimplementedClientServiceServer) ListLeases(context.Context, *ListLeasesRequest) (*ListLeasesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListLeases not implemented") +} +func (UnimplementedClientServiceServer) CreateLease(context.Context, *CreateLeaseRequest) (*Lease, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateLease not implemented") +} +func (UnimplementedClientServiceServer) UpdateLease(context.Context, *UpdateLeaseRequest) (*Lease, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateLease not implemented") +} +func (UnimplementedClientServiceServer) DeleteLease(context.Context, *DeleteLeaseRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteLease not implemented") +} +func (UnimplementedClientServiceServer) mustEmbedUnimplementedClientServiceServer() {} +func (UnimplementedClientServiceServer) testEmbeddedByValue() {} + +// UnsafeClientServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ClientServiceServer will +// result in compilation errors. +type UnsafeClientServiceServer interface { + mustEmbedUnimplementedClientServiceServer() +} + +func RegisterClientServiceServer(s grpc.ServiceRegistrar, srv ClientServiceServer) { + // If the following call pancis, it indicates UnimplementedClientServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&ClientService_ServiceDesc, srv) +} + +func _ClientService_GetExporter_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetExporterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClientServiceServer).GetExporter(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ClientService_GetExporter_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClientServiceServer).GetExporter(ctx, req.(*GetExporterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClientService_ListExporters_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListExportersRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClientServiceServer).ListExporters(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ClientService_ListExporters_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClientServiceServer).ListExporters(ctx, req.(*ListExportersRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClientService_GetLease_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetLeaseRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClientServiceServer).GetLease(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ClientService_GetLease_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClientServiceServer).GetLease(ctx, req.(*GetLeaseRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClientService_ListLeases_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListLeasesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClientServiceServer).ListLeases(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ClientService_ListLeases_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClientServiceServer).ListLeases(ctx, req.(*ListLeasesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClientService_CreateLease_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateLeaseRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClientServiceServer).CreateLease(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ClientService_CreateLease_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClientServiceServer).CreateLease(ctx, req.(*CreateLeaseRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClientService_UpdateLease_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateLeaseRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClientServiceServer).UpdateLease(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ClientService_UpdateLease_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClientServiceServer).UpdateLease(ctx, req.(*UpdateLeaseRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClientService_DeleteLease_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteLeaseRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClientServiceServer).DeleteLease(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ClientService_DeleteLease_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClientServiceServer).DeleteLease(ctx, req.(*DeleteLeaseRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// ClientService_ServiceDesc is the grpc.ServiceDesc for ClientService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var ClientService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "jumpstarter.client.v1.ClientService", + HandlerType: (*ClientServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetExporter", + Handler: _ClientService_GetExporter_Handler, + }, + { + MethodName: "ListExporters", + Handler: _ClientService_ListExporters_Handler, + }, + { + MethodName: "GetLease", + Handler: _ClientService_GetLease_Handler, + }, + { + MethodName: "ListLeases", + Handler: _ClientService_ListLeases_Handler, + }, + { + MethodName: "CreateLease", + Handler: _ClientService_CreateLease_Handler, + }, + { + MethodName: "UpdateLease", + Handler: _ClientService_UpdateLease_Handler, + }, + { + MethodName: "DeleteLease", + Handler: _ClientService_DeleteLease_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "jumpstarter/client/v1/client.proto", +} diff --git a/controller/internal/protocol/jumpstarter/v1/common.pb.go b/controller/internal/protocol/jumpstarter/v1/common.pb.go new file mode 100644 index 000000000..1ff75334f --- /dev/null +++ b/controller/internal/protocol/jumpstarter/v1/common.pb.go @@ -0,0 +1,216 @@ +// Copyright 2024 The Jumpstarter Authors + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.10 +// protoc (unknown) +// source: jumpstarter/v1/common.proto + +package jumpstarterv1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Exporter status information +type ExporterStatus int32 + +const ( + ExporterStatus_EXPORTER_STATUS_UNSPECIFIED ExporterStatus = 0 // Unspecified exporter status + ExporterStatus_EXPORTER_STATUS_OFFLINE ExporterStatus = 1 // Exporter is offline + ExporterStatus_EXPORTER_STATUS_AVAILABLE ExporterStatus = 2 // Exporter is available to be leased + ExporterStatus_EXPORTER_STATUS_BEFORE_LEASE_HOOK ExporterStatus = 3 // Exporter is executing before lease hook(s) + ExporterStatus_EXPORTER_STATUS_LEASE_READY ExporterStatus = 4 // Exporter is leased and ready to accept commands + ExporterStatus_EXPORTER_STATUS_AFTER_LEASE_HOOK ExporterStatus = 5 // Exporter is executing after lease hook(s) + ExporterStatus_EXPORTER_STATUS_BEFORE_LEASE_HOOK_FAILED ExporterStatus = 6 // Exporter before lease hook failed + ExporterStatus_EXPORTER_STATUS_AFTER_LEASE_HOOK_FAILED ExporterStatus = 7 // Exporter after lease hook failed +) + +// Enum value maps for ExporterStatus. +var ( + ExporterStatus_name = map[int32]string{ + 0: "EXPORTER_STATUS_UNSPECIFIED", + 1: "EXPORTER_STATUS_OFFLINE", + 2: "EXPORTER_STATUS_AVAILABLE", + 3: "EXPORTER_STATUS_BEFORE_LEASE_HOOK", + 4: "EXPORTER_STATUS_LEASE_READY", + 5: "EXPORTER_STATUS_AFTER_LEASE_HOOK", + 6: "EXPORTER_STATUS_BEFORE_LEASE_HOOK_FAILED", + 7: "EXPORTER_STATUS_AFTER_LEASE_HOOK_FAILED", + } + ExporterStatus_value = map[string]int32{ + "EXPORTER_STATUS_UNSPECIFIED": 0, + "EXPORTER_STATUS_OFFLINE": 1, + "EXPORTER_STATUS_AVAILABLE": 2, + "EXPORTER_STATUS_BEFORE_LEASE_HOOK": 3, + "EXPORTER_STATUS_LEASE_READY": 4, + "EXPORTER_STATUS_AFTER_LEASE_HOOK": 5, + "EXPORTER_STATUS_BEFORE_LEASE_HOOK_FAILED": 6, + "EXPORTER_STATUS_AFTER_LEASE_HOOK_FAILED": 7, + } +) + +func (x ExporterStatus) Enum() *ExporterStatus { + p := new(ExporterStatus) + *p = x + return p +} + +func (x ExporterStatus) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ExporterStatus) Descriptor() protoreflect.EnumDescriptor { + return file_jumpstarter_v1_common_proto_enumTypes[0].Descriptor() +} + +func (ExporterStatus) Type() protoreflect.EnumType { + return &file_jumpstarter_v1_common_proto_enumTypes[0] +} + +func (x ExporterStatus) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ExporterStatus.Descriptor instead. +func (ExporterStatus) EnumDescriptor() ([]byte, []int) { + return file_jumpstarter_v1_common_proto_rawDescGZIP(), []int{0} +} + +// Source of log stream messages +type LogSource int32 + +const ( + LogSource_LOG_SOURCE_UNSPECIFIED LogSource = 0 // Unspecified log source + LogSource_LOG_SOURCE_DRIVER LogSource = 1 // Driver/device logs + LogSource_LOG_SOURCE_BEFORE_LEASE_HOOK LogSource = 2 // beforeLease hook execution logs + LogSource_LOG_SOURCE_AFTER_LEASE_HOOK LogSource = 3 // afterLease hook execution logs + LogSource_LOG_SOURCE_SYSTEM LogSource = 4 // System/exporter logs +) + +// Enum value maps for LogSource. +var ( + LogSource_name = map[int32]string{ + 0: "LOG_SOURCE_UNSPECIFIED", + 1: "LOG_SOURCE_DRIVER", + 2: "LOG_SOURCE_BEFORE_LEASE_HOOK", + 3: "LOG_SOURCE_AFTER_LEASE_HOOK", + 4: "LOG_SOURCE_SYSTEM", + } + LogSource_value = map[string]int32{ + "LOG_SOURCE_UNSPECIFIED": 0, + "LOG_SOURCE_DRIVER": 1, + "LOG_SOURCE_BEFORE_LEASE_HOOK": 2, + "LOG_SOURCE_AFTER_LEASE_HOOK": 3, + "LOG_SOURCE_SYSTEM": 4, + } +) + +func (x LogSource) Enum() *LogSource { + p := new(LogSource) + *p = x + return p +} + +func (x LogSource) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (LogSource) Descriptor() protoreflect.EnumDescriptor { + return file_jumpstarter_v1_common_proto_enumTypes[1].Descriptor() +} + +func (LogSource) Type() protoreflect.EnumType { + return &file_jumpstarter_v1_common_proto_enumTypes[1] +} + +func (x LogSource) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use LogSource.Descriptor instead. +func (LogSource) EnumDescriptor() ([]byte, []int) { + return file_jumpstarter_v1_common_proto_rawDescGZIP(), []int{1} +} + +var File_jumpstarter_v1_common_proto protoreflect.FileDescriptor + +const file_jumpstarter_v1_common_proto_rawDesc = "" + + "\n" + + "\x1bjumpstarter/v1/common.proto\x12\x0ejumpstarter.v1*\xb6\x02\n" + + "\x0eExporterStatus\x12\x1f\n" + + "\x1bEXPORTER_STATUS_UNSPECIFIED\x10\x00\x12\x1b\n" + + "\x17EXPORTER_STATUS_OFFLINE\x10\x01\x12\x1d\n" + + "\x19EXPORTER_STATUS_AVAILABLE\x10\x02\x12%\n" + + "!EXPORTER_STATUS_BEFORE_LEASE_HOOK\x10\x03\x12\x1f\n" + + "\x1bEXPORTER_STATUS_LEASE_READY\x10\x04\x12$\n" + + " EXPORTER_STATUS_AFTER_LEASE_HOOK\x10\x05\x12,\n" + + "(EXPORTER_STATUS_BEFORE_LEASE_HOOK_FAILED\x10\x06\x12+\n" + + "'EXPORTER_STATUS_AFTER_LEASE_HOOK_FAILED\x10\a*\x98\x01\n" + + "\tLogSource\x12\x1a\n" + + "\x16LOG_SOURCE_UNSPECIFIED\x10\x00\x12\x15\n" + + "\x11LOG_SOURCE_DRIVER\x10\x01\x12 \n" + + "\x1cLOG_SOURCE_BEFORE_LEASE_HOOK\x10\x02\x12\x1f\n" + + "\x1bLOG_SOURCE_AFTER_LEASE_HOOK\x10\x03\x12\x15\n" + + "\x11LOG_SOURCE_SYSTEM\x10\x04B\xdc\x01\n" + + "\x12com.jumpstarter.v1B\vCommonProtoP\x01Z`github.com/jumpstarter-dev/jumpstarter-controller/internal/protocol/jumpstarter/v1;jumpstarterv1\xa2\x02\x03JXX\xaa\x02\x0eJumpstarter.V1\xca\x02\x0eJumpstarter\\V1\xe2\x02\x1aJumpstarter\\V1\\GPBMetadata\xea\x02\x0fJumpstarter::V1b\x06proto3" + +var ( + file_jumpstarter_v1_common_proto_rawDescOnce sync.Once + file_jumpstarter_v1_common_proto_rawDescData []byte +) + +func file_jumpstarter_v1_common_proto_rawDescGZIP() []byte { + file_jumpstarter_v1_common_proto_rawDescOnce.Do(func() { + file_jumpstarter_v1_common_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_jumpstarter_v1_common_proto_rawDesc), len(file_jumpstarter_v1_common_proto_rawDesc))) + }) + return file_jumpstarter_v1_common_proto_rawDescData +} + +var file_jumpstarter_v1_common_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_jumpstarter_v1_common_proto_goTypes = []any{ + (ExporterStatus)(0), // 0: jumpstarter.v1.ExporterStatus + (LogSource)(0), // 1: jumpstarter.v1.LogSource +} +var file_jumpstarter_v1_common_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_jumpstarter_v1_common_proto_init() } +func file_jumpstarter_v1_common_proto_init() { + if File_jumpstarter_v1_common_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_jumpstarter_v1_common_proto_rawDesc), len(file_jumpstarter_v1_common_proto_rawDesc)), + NumEnums: 2, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_jumpstarter_v1_common_proto_goTypes, + DependencyIndexes: file_jumpstarter_v1_common_proto_depIdxs, + EnumInfos: file_jumpstarter_v1_common_proto_enumTypes, + }.Build() + File_jumpstarter_v1_common_proto = out.File + file_jumpstarter_v1_common_proto_goTypes = nil + file_jumpstarter_v1_common_proto_depIdxs = nil +} diff --git a/controller/internal/protocol/jumpstarter/v1/jumpstarter.pb.go b/controller/internal/protocol/jumpstarter/v1/jumpstarter.pb.go new file mode 100644 index 000000000..0228f1dec --- /dev/null +++ b/controller/internal/protocol/jumpstarter/v1/jumpstarter.pb.go @@ -0,0 +1,2015 @@ +// Copyright 2024 The Jumpstarter Authors + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.10 +// protoc (unknown) +// source: jumpstarter/v1/jumpstarter.proto + +package jumpstarterv1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + emptypb "google.golang.org/protobuf/types/known/emptypb" + structpb "google.golang.org/protobuf/types/known/structpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type RegisterRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // additional context: + // - token/authentication mechanism + Labels map[string]string `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // standard labels: + // jumpstarter.dev/hostname= + // jumpstarter.dev/name= + Reports []*DriverInstanceReport `protobuf:"bytes,2,rep,name=reports,proto3" json:"reports,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RegisterRequest) Reset() { + *x = RegisterRequest{} + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RegisterRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RegisterRequest) ProtoMessage() {} + +func (x *RegisterRequest) ProtoReflect() protoreflect.Message { + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RegisterRequest.ProtoReflect.Descriptor instead. +func (*RegisterRequest) Descriptor() ([]byte, []int) { + return file_jumpstarter_v1_jumpstarter_proto_rawDescGZIP(), []int{0} +} + +func (x *RegisterRequest) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} + +func (x *RegisterRequest) GetReports() []*DriverInstanceReport { + if x != nil { + return x.Reports + } + return nil +} + +type DriverInstanceReport struct { + state protoimpl.MessageState `protogen:"open.v1"` + Uuid string `protobuf:"bytes,1,opt,name=uuid,proto3" json:"uuid,omitempty"` // a unique id within the exporter + ParentUuid *string `protobuf:"bytes,2,opt,name=parent_uuid,json=parentUuid,proto3,oneof" json:"parent_uuid,omitempty"` // optional, if device has a parent device + Labels map[string]string `protobuf:"bytes,3,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Description *string `protobuf:"bytes,4,opt,name=description,proto3,oneof" json:"description,omitempty"` // optional custom driver description for CLI + MethodsDescription map[string]string `protobuf:"bytes,5,rep,name=methods_description,json=methodsDescription,proto3" json:"methods_description,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` // method name -> help text for CLI + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DriverInstanceReport) Reset() { + *x = DriverInstanceReport{} + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DriverInstanceReport) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DriverInstanceReport) ProtoMessage() {} + +func (x *DriverInstanceReport) ProtoReflect() protoreflect.Message { + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DriverInstanceReport.ProtoReflect.Descriptor instead. +func (*DriverInstanceReport) Descriptor() ([]byte, []int) { + return file_jumpstarter_v1_jumpstarter_proto_rawDescGZIP(), []int{1} +} + +func (x *DriverInstanceReport) GetUuid() string { + if x != nil { + return x.Uuid + } + return "" +} + +func (x *DriverInstanceReport) GetParentUuid() string { + if x != nil && x.ParentUuid != nil { + return *x.ParentUuid + } + return "" +} + +func (x *DriverInstanceReport) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} + +func (x *DriverInstanceReport) GetDescription() string { + if x != nil && x.Description != nil { + return *x.Description + } + return "" +} + +func (x *DriverInstanceReport) GetMethodsDescription() map[string]string { + if x != nil { + return x.MethodsDescription + } + return nil +} + +type RegisterResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Uuid string `protobuf:"bytes,1,opt,name=uuid,proto3" json:"uuid,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RegisterResponse) Reset() { + *x = RegisterResponse{} + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RegisterResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RegisterResponse) ProtoMessage() {} + +func (x *RegisterResponse) ProtoReflect() protoreflect.Message { + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RegisterResponse.ProtoReflect.Descriptor instead. +func (*RegisterResponse) Descriptor() ([]byte, []int) { + return file_jumpstarter_v1_jumpstarter_proto_rawDescGZIP(), []int{2} +} + +func (x *RegisterResponse) GetUuid() string { + if x != nil { + return x.Uuid + } + return "" +} + +type UnregisterRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Reason string `protobuf:"bytes,2,opt,name=reason,proto3" json:"reason,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UnregisterRequest) Reset() { + *x = UnregisterRequest{} + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UnregisterRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UnregisterRequest) ProtoMessage() {} + +func (x *UnregisterRequest) ProtoReflect() protoreflect.Message { + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UnregisterRequest.ProtoReflect.Descriptor instead. +func (*UnregisterRequest) Descriptor() ([]byte, []int) { + return file_jumpstarter_v1_jumpstarter_proto_rawDescGZIP(), []int{3} +} + +func (x *UnregisterRequest) GetReason() string { + if x != nil { + return x.Reason + } + return "" +} + +type UnregisterResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UnregisterResponse) Reset() { + *x = UnregisterResponse{} + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UnregisterResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UnregisterResponse) ProtoMessage() {} + +func (x *UnregisterResponse) ProtoReflect() protoreflect.Message { + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UnregisterResponse.ProtoReflect.Descriptor instead. +func (*UnregisterResponse) Descriptor() ([]byte, []int) { + return file_jumpstarter_v1_jumpstarter_proto_rawDescGZIP(), []int{4} +} + +type ListenRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + LeaseName string `protobuf:"bytes,1,opt,name=lease_name,json=leaseName,proto3" json:"lease_name,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ListenRequest) Reset() { + *x = ListenRequest{} + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListenRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListenRequest) ProtoMessage() {} + +func (x *ListenRequest) ProtoReflect() protoreflect.Message { + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListenRequest.ProtoReflect.Descriptor instead. +func (*ListenRequest) Descriptor() ([]byte, []int) { + return file_jumpstarter_v1_jumpstarter_proto_rawDescGZIP(), []int{5} +} + +func (x *ListenRequest) GetLeaseName() string { + if x != nil { + return x.LeaseName + } + return "" +} + +type ListenResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + RouterEndpoint string `protobuf:"bytes,1,opt,name=router_endpoint,json=routerEndpoint,proto3" json:"router_endpoint,omitempty"` + RouterToken string `protobuf:"bytes,2,opt,name=router_token,json=routerToken,proto3" json:"router_token,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ListenResponse) Reset() { + *x = ListenResponse{} + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListenResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListenResponse) ProtoMessage() {} + +func (x *ListenResponse) ProtoReflect() protoreflect.Message { + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListenResponse.ProtoReflect.Descriptor instead. +func (*ListenResponse) Descriptor() ([]byte, []int) { + return file_jumpstarter_v1_jumpstarter_proto_rawDescGZIP(), []int{6} +} + +func (x *ListenResponse) GetRouterEndpoint() string { + if x != nil { + return x.RouterEndpoint + } + return "" +} + +func (x *ListenResponse) GetRouterToken() string { + if x != nil { + return x.RouterToken + } + return "" +} + +type StatusRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StatusRequest) Reset() { + *x = StatusRequest{} + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StatusRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatusRequest) ProtoMessage() {} + +func (x *StatusRequest) ProtoReflect() protoreflect.Message { + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatusRequest.ProtoReflect.Descriptor instead. +func (*StatusRequest) Descriptor() ([]byte, []int) { + return file_jumpstarter_v1_jumpstarter_proto_rawDescGZIP(), []int{7} +} + +type StatusResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Leased bool `protobuf:"varint,1,opt,name=leased,proto3" json:"leased,omitempty"` + LeaseName *string `protobuf:"bytes,2,opt,name=lease_name,json=leaseName,proto3,oneof" json:"lease_name,omitempty"` + ClientName *string `protobuf:"bytes,3,opt,name=client_name,json=clientName,proto3,oneof" json:"client_name,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StatusResponse) Reset() { + *x = StatusResponse{} + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StatusResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatusResponse) ProtoMessage() {} + +func (x *StatusResponse) ProtoReflect() protoreflect.Message { + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatusResponse.ProtoReflect.Descriptor instead. +func (*StatusResponse) Descriptor() ([]byte, []int) { + return file_jumpstarter_v1_jumpstarter_proto_rawDescGZIP(), []int{8} +} + +func (x *StatusResponse) GetLeased() bool { + if x != nil { + return x.Leased + } + return false +} + +func (x *StatusResponse) GetLeaseName() string { + if x != nil && x.LeaseName != nil { + return *x.LeaseName + } + return "" +} + +func (x *StatusResponse) GetClientName() string { + if x != nil && x.ClientName != nil { + return *x.ClientName + } + return "" +} + +type DialRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + LeaseName string `protobuf:"bytes,1,opt,name=lease_name,json=leaseName,proto3" json:"lease_name,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DialRequest) Reset() { + *x = DialRequest{} + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DialRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DialRequest) ProtoMessage() {} + +func (x *DialRequest) ProtoReflect() protoreflect.Message { + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DialRequest.ProtoReflect.Descriptor instead. +func (*DialRequest) Descriptor() ([]byte, []int) { + return file_jumpstarter_v1_jumpstarter_proto_rawDescGZIP(), []int{9} +} + +func (x *DialRequest) GetLeaseName() string { + if x != nil { + return x.LeaseName + } + return "" +} + +type DialResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + RouterEndpoint string `protobuf:"bytes,1,opt,name=router_endpoint,json=routerEndpoint,proto3" json:"router_endpoint,omitempty"` + RouterToken string `protobuf:"bytes,2,opt,name=router_token,json=routerToken,proto3" json:"router_token,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DialResponse) Reset() { + *x = DialResponse{} + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DialResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DialResponse) ProtoMessage() {} + +func (x *DialResponse) ProtoReflect() protoreflect.Message { + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[10] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DialResponse.ProtoReflect.Descriptor instead. +func (*DialResponse) Descriptor() ([]byte, []int) { + return file_jumpstarter_v1_jumpstarter_proto_rawDescGZIP(), []int{10} +} + +func (x *DialResponse) GetRouterEndpoint() string { + if x != nil { + return x.RouterEndpoint + } + return "" +} + +func (x *DialResponse) GetRouterToken() string { + if x != nil { + return x.RouterToken + } + return "" +} + +type AuditStreamRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // additional context: + // - token/authentication mechanism + ExporterUuid string `protobuf:"bytes,1,opt,name=exporter_uuid,json=exporterUuid,proto3" json:"exporter_uuid,omitempty"` + DriverInstanceUuid string `protobuf:"bytes,2,opt,name=driver_instance_uuid,json=driverInstanceUuid,proto3" json:"driver_instance_uuid,omitempty"` + Severity string `protobuf:"bytes,3,opt,name=severity,proto3" json:"severity,omitempty"` + Message string `protobuf:"bytes,4,opt,name=message,proto3" json:"message,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *AuditStreamRequest) Reset() { + *x = AuditStreamRequest{} + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *AuditStreamRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AuditStreamRequest) ProtoMessage() {} + +func (x *AuditStreamRequest) ProtoReflect() protoreflect.Message { + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[11] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AuditStreamRequest.ProtoReflect.Descriptor instead. +func (*AuditStreamRequest) Descriptor() ([]byte, []int) { + return file_jumpstarter_v1_jumpstarter_proto_rawDescGZIP(), []int{11} +} + +func (x *AuditStreamRequest) GetExporterUuid() string { + if x != nil { + return x.ExporterUuid + } + return "" +} + +func (x *AuditStreamRequest) GetDriverInstanceUuid() string { + if x != nil { + return x.DriverInstanceUuid + } + return "" +} + +func (x *AuditStreamRequest) GetSeverity() string { + if x != nil { + return x.Severity + } + return "" +} + +func (x *AuditStreamRequest) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +type ReportStatusRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Status ExporterStatus `protobuf:"varint,1,opt,name=status,proto3,enum=jumpstarter.v1.ExporterStatus" json:"status,omitempty"` + Message *string `protobuf:"bytes,2,opt,name=message,proto3,oneof" json:"message,omitempty"` // Optional human-readable status message + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ReportStatusRequest) Reset() { + *x = ReportStatusRequest{} + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ReportStatusRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReportStatusRequest) ProtoMessage() {} + +func (x *ReportStatusRequest) ProtoReflect() protoreflect.Message { + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[12] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReportStatusRequest.ProtoReflect.Descriptor instead. +func (*ReportStatusRequest) Descriptor() ([]byte, []int) { + return file_jumpstarter_v1_jumpstarter_proto_rawDescGZIP(), []int{12} +} + +func (x *ReportStatusRequest) GetStatus() ExporterStatus { + if x != nil { + return x.Status + } + return ExporterStatus_EXPORTER_STATUS_UNSPECIFIED +} + +func (x *ReportStatusRequest) GetMessage() string { + if x != nil && x.Message != nil { + return *x.Message + } + return "" +} + +type ReportStatusResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ReportStatusResponse) Reset() { + *x = ReportStatusResponse{} + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ReportStatusResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReportStatusResponse) ProtoMessage() {} + +func (x *ReportStatusResponse) ProtoReflect() protoreflect.Message { + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[13] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReportStatusResponse.ProtoReflect.Descriptor instead. +func (*ReportStatusResponse) Descriptor() ([]byte, []int) { + return file_jumpstarter_v1_jumpstarter_proto_rawDescGZIP(), []int{13} +} + +type GetReportResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Uuid string `protobuf:"bytes,1,opt,name=uuid,proto3" json:"uuid,omitempty"` + Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // standard labels: + // jumpstarter.dev/hostname= + // jumpstarter.dev/name= + Reports []*DriverInstanceReport `protobuf:"bytes,3,rep,name=reports,proto3" json:"reports,omitempty"` + AlternativeEndpoints []*Endpoint `protobuf:"bytes,4,rep,name=alternative_endpoints,json=alternativeEndpoints,proto3" json:"alternative_endpoints,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetReportResponse) Reset() { + *x = GetReportResponse{} + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetReportResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetReportResponse) ProtoMessage() {} + +func (x *GetReportResponse) ProtoReflect() protoreflect.Message { + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[14] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetReportResponse.ProtoReflect.Descriptor instead. +func (*GetReportResponse) Descriptor() ([]byte, []int) { + return file_jumpstarter_v1_jumpstarter_proto_rawDescGZIP(), []int{14} +} + +func (x *GetReportResponse) GetUuid() string { + if x != nil { + return x.Uuid + } + return "" +} + +func (x *GetReportResponse) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} + +func (x *GetReportResponse) GetReports() []*DriverInstanceReport { + if x != nil { + return x.Reports + } + return nil +} + +func (x *GetReportResponse) GetAlternativeEndpoints() []*Endpoint { + if x != nil { + return x.AlternativeEndpoints + } + return nil +} + +type Endpoint struct { + state protoimpl.MessageState `protogen:"open.v1"` + Endpoint string `protobuf:"bytes,1,opt,name=endpoint,proto3" json:"endpoint,omitempty"` + Certificate string `protobuf:"bytes,2,opt,name=certificate,proto3" json:"certificate,omitempty"` + ClientCertificate string `protobuf:"bytes,3,opt,name=client_certificate,json=clientCertificate,proto3" json:"client_certificate,omitempty"` + ClientPrivateKey string `protobuf:"bytes,4,opt,name=client_private_key,json=clientPrivateKey,proto3" json:"client_private_key,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Endpoint) Reset() { + *x = Endpoint{} + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Endpoint) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Endpoint) ProtoMessage() {} + +func (x *Endpoint) ProtoReflect() protoreflect.Message { + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[15] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Endpoint.ProtoReflect.Descriptor instead. +func (*Endpoint) Descriptor() ([]byte, []int) { + return file_jumpstarter_v1_jumpstarter_proto_rawDescGZIP(), []int{15} +} + +func (x *Endpoint) GetEndpoint() string { + if x != nil { + return x.Endpoint + } + return "" +} + +func (x *Endpoint) GetCertificate() string { + if x != nil { + return x.Certificate + } + return "" +} + +func (x *Endpoint) GetClientCertificate() string { + if x != nil { + return x.ClientCertificate + } + return "" +} + +func (x *Endpoint) GetClientPrivateKey() string { + if x != nil { + return x.ClientPrivateKey + } + return "" +} + +type DriverCallRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Uuid string `protobuf:"bytes,1,opt,name=uuid,proto3" json:"uuid,omitempty"` + Method string `protobuf:"bytes,2,opt,name=method,proto3" json:"method,omitempty"` + Args []*structpb.Value `protobuf:"bytes,3,rep,name=args,proto3" json:"args,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DriverCallRequest) Reset() { + *x = DriverCallRequest{} + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DriverCallRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DriverCallRequest) ProtoMessage() {} + +func (x *DriverCallRequest) ProtoReflect() protoreflect.Message { + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[16] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DriverCallRequest.ProtoReflect.Descriptor instead. +func (*DriverCallRequest) Descriptor() ([]byte, []int) { + return file_jumpstarter_v1_jumpstarter_proto_rawDescGZIP(), []int{16} +} + +func (x *DriverCallRequest) GetUuid() string { + if x != nil { + return x.Uuid + } + return "" +} + +func (x *DriverCallRequest) GetMethod() string { + if x != nil { + return x.Method + } + return "" +} + +func (x *DriverCallRequest) GetArgs() []*structpb.Value { + if x != nil { + return x.Args + } + return nil +} + +type DriverCallResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Uuid string `protobuf:"bytes,1,opt,name=uuid,proto3" json:"uuid,omitempty"` + Result *structpb.Value `protobuf:"bytes,2,opt,name=result,proto3" json:"result,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DriverCallResponse) Reset() { + *x = DriverCallResponse{} + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DriverCallResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DriverCallResponse) ProtoMessage() {} + +func (x *DriverCallResponse) ProtoReflect() protoreflect.Message { + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[17] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DriverCallResponse.ProtoReflect.Descriptor instead. +func (*DriverCallResponse) Descriptor() ([]byte, []int) { + return file_jumpstarter_v1_jumpstarter_proto_rawDescGZIP(), []int{17} +} + +func (x *DriverCallResponse) GetUuid() string { + if x != nil { + return x.Uuid + } + return "" +} + +func (x *DriverCallResponse) GetResult() *structpb.Value { + if x != nil { + return x.Result + } + return nil +} + +type StreamingDriverCallRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Uuid string `protobuf:"bytes,1,opt,name=uuid,proto3" json:"uuid,omitempty"` + Method string `protobuf:"bytes,2,opt,name=method,proto3" json:"method,omitempty"` + Args []*structpb.Value `protobuf:"bytes,3,rep,name=args,proto3" json:"args,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StreamingDriverCallRequest) Reset() { + *x = StreamingDriverCallRequest{} + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StreamingDriverCallRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StreamingDriverCallRequest) ProtoMessage() {} + +func (x *StreamingDriverCallRequest) ProtoReflect() protoreflect.Message { + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[18] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StreamingDriverCallRequest.ProtoReflect.Descriptor instead. +func (*StreamingDriverCallRequest) Descriptor() ([]byte, []int) { + return file_jumpstarter_v1_jumpstarter_proto_rawDescGZIP(), []int{18} +} + +func (x *StreamingDriverCallRequest) GetUuid() string { + if x != nil { + return x.Uuid + } + return "" +} + +func (x *StreamingDriverCallRequest) GetMethod() string { + if x != nil { + return x.Method + } + return "" +} + +func (x *StreamingDriverCallRequest) GetArgs() []*structpb.Value { + if x != nil { + return x.Args + } + return nil +} + +type StreamingDriverCallResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Uuid string `protobuf:"bytes,1,opt,name=uuid,proto3" json:"uuid,omitempty"` + Result *structpb.Value `protobuf:"bytes,2,opt,name=result,proto3" json:"result,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StreamingDriverCallResponse) Reset() { + *x = StreamingDriverCallResponse{} + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StreamingDriverCallResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StreamingDriverCallResponse) ProtoMessage() {} + +func (x *StreamingDriverCallResponse) ProtoReflect() protoreflect.Message { + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[19] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StreamingDriverCallResponse.ProtoReflect.Descriptor instead. +func (*StreamingDriverCallResponse) Descriptor() ([]byte, []int) { + return file_jumpstarter_v1_jumpstarter_proto_rawDescGZIP(), []int{19} +} + +func (x *StreamingDriverCallResponse) GetUuid() string { + if x != nil { + return x.Uuid + } + return "" +} + +func (x *StreamingDriverCallResponse) GetResult() *structpb.Value { + if x != nil { + return x.Result + } + return nil +} + +type LogStreamResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Uuid string `protobuf:"bytes,1,opt,name=uuid,proto3" json:"uuid,omitempty"` + Severity string `protobuf:"bytes,2,opt,name=severity,proto3" json:"severity,omitempty"` + Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"` + Source *LogSource `protobuf:"varint,4,opt,name=source,proto3,enum=jumpstarter.v1.LogSource,oneof" json:"source,omitempty"` // New optional field + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *LogStreamResponse) Reset() { + *x = LogStreamResponse{} + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *LogStreamResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LogStreamResponse) ProtoMessage() {} + +func (x *LogStreamResponse) ProtoReflect() protoreflect.Message { + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[20] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LogStreamResponse.ProtoReflect.Descriptor instead. +func (*LogStreamResponse) Descriptor() ([]byte, []int) { + return file_jumpstarter_v1_jumpstarter_proto_rawDescGZIP(), []int{20} +} + +func (x *LogStreamResponse) GetUuid() string { + if x != nil { + return x.Uuid + } + return "" +} + +func (x *LogStreamResponse) GetSeverity() string { + if x != nil { + return x.Severity + } + return "" +} + +func (x *LogStreamResponse) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +func (x *LogStreamResponse) GetSource() LogSource { + if x != nil && x.Source != nil { + return *x.Source + } + return LogSource_LOG_SOURCE_UNSPECIFIED +} + +type ResetRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ResetRequest) Reset() { + *x = ResetRequest{} + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ResetRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResetRequest) ProtoMessage() {} + +func (x *ResetRequest) ProtoReflect() protoreflect.Message { + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[21] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResetRequest.ProtoReflect.Descriptor instead. +func (*ResetRequest) Descriptor() ([]byte, []int) { + return file_jumpstarter_v1_jumpstarter_proto_rawDescGZIP(), []int{21} +} + +type ResetResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ResetResponse) Reset() { + *x = ResetResponse{} + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ResetResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResetResponse) ProtoMessage() {} + +func (x *ResetResponse) ProtoReflect() protoreflect.Message { + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[22] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResetResponse.ProtoReflect.Descriptor instead. +func (*ResetResponse) Descriptor() ([]byte, []int) { + return file_jumpstarter_v1_jumpstarter_proto_rawDescGZIP(), []int{22} +} + +type GetLeaseRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetLeaseRequest) Reset() { + *x = GetLeaseRequest{} + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetLeaseRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetLeaseRequest) ProtoMessage() {} + +func (x *GetLeaseRequest) ProtoReflect() protoreflect.Message { + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[23] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetLeaseRequest.ProtoReflect.Descriptor instead. +func (*GetLeaseRequest) Descriptor() ([]byte, []int) { + return file_jumpstarter_v1_jumpstarter_proto_rawDescGZIP(), []int{23} +} + +func (x *GetLeaseRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +type GetLeaseResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Duration *durationpb.Duration `protobuf:"bytes,1,opt,name=duration,proto3" json:"duration,omitempty"` + Selector *LabelSelector `protobuf:"bytes,2,opt,name=selector,proto3" json:"selector,omitempty"` + BeginTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=begin_time,json=beginTime,proto3,oneof" json:"begin_time,omitempty"` + EndTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=end_time,json=endTime,proto3,oneof" json:"end_time,omitempty"` + ExporterUuid *string `protobuf:"bytes,5,opt,name=exporter_uuid,json=exporterUuid,proto3,oneof" json:"exporter_uuid,omitempty"` + Conditions []*Condition `protobuf:"bytes,6,rep,name=conditions,proto3" json:"conditions,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetLeaseResponse) Reset() { + *x = GetLeaseResponse{} + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetLeaseResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetLeaseResponse) ProtoMessage() {} + +func (x *GetLeaseResponse) ProtoReflect() protoreflect.Message { + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[24] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetLeaseResponse.ProtoReflect.Descriptor instead. +func (*GetLeaseResponse) Descriptor() ([]byte, []int) { + return file_jumpstarter_v1_jumpstarter_proto_rawDescGZIP(), []int{24} +} + +func (x *GetLeaseResponse) GetDuration() *durationpb.Duration { + if x != nil { + return x.Duration + } + return nil +} + +func (x *GetLeaseResponse) GetSelector() *LabelSelector { + if x != nil { + return x.Selector + } + return nil +} + +func (x *GetLeaseResponse) GetBeginTime() *timestamppb.Timestamp { + if x != nil { + return x.BeginTime + } + return nil +} + +func (x *GetLeaseResponse) GetEndTime() *timestamppb.Timestamp { + if x != nil { + return x.EndTime + } + return nil +} + +func (x *GetLeaseResponse) GetExporterUuid() string { + if x != nil && x.ExporterUuid != nil { + return *x.ExporterUuid + } + return "" +} + +func (x *GetLeaseResponse) GetConditions() []*Condition { + if x != nil { + return x.Conditions + } + return nil +} + +type RequestLeaseRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Duration *durationpb.Duration `protobuf:"bytes,1,opt,name=duration,proto3" json:"duration,omitempty"` + Selector *LabelSelector `protobuf:"bytes,2,opt,name=selector,proto3" json:"selector,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RequestLeaseRequest) Reset() { + *x = RequestLeaseRequest{} + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RequestLeaseRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RequestLeaseRequest) ProtoMessage() {} + +func (x *RequestLeaseRequest) ProtoReflect() protoreflect.Message { + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[25] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RequestLeaseRequest.ProtoReflect.Descriptor instead. +func (*RequestLeaseRequest) Descriptor() ([]byte, []int) { + return file_jumpstarter_v1_jumpstarter_proto_rawDescGZIP(), []int{25} +} + +func (x *RequestLeaseRequest) GetDuration() *durationpb.Duration { + if x != nil { + return x.Duration + } + return nil +} + +func (x *RequestLeaseRequest) GetSelector() *LabelSelector { + if x != nil { + return x.Selector + } + return nil +} + +type RequestLeaseResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RequestLeaseResponse) Reset() { + *x = RequestLeaseResponse{} + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RequestLeaseResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RequestLeaseResponse) ProtoMessage() {} + +func (x *RequestLeaseResponse) ProtoReflect() protoreflect.Message { + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[26] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RequestLeaseResponse.ProtoReflect.Descriptor instead. +func (*RequestLeaseResponse) Descriptor() ([]byte, []int) { + return file_jumpstarter_v1_jumpstarter_proto_rawDescGZIP(), []int{26} +} + +func (x *RequestLeaseResponse) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +type ReleaseLeaseRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ReleaseLeaseRequest) Reset() { + *x = ReleaseLeaseRequest{} + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ReleaseLeaseRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReleaseLeaseRequest) ProtoMessage() {} + +func (x *ReleaseLeaseRequest) ProtoReflect() protoreflect.Message { + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[27] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReleaseLeaseRequest.ProtoReflect.Descriptor instead. +func (*ReleaseLeaseRequest) Descriptor() ([]byte, []int) { + return file_jumpstarter_v1_jumpstarter_proto_rawDescGZIP(), []int{27} +} + +func (x *ReleaseLeaseRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +type ReleaseLeaseResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ReleaseLeaseResponse) Reset() { + *x = ReleaseLeaseResponse{} + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ReleaseLeaseResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReleaseLeaseResponse) ProtoMessage() {} + +func (x *ReleaseLeaseResponse) ProtoReflect() protoreflect.Message { + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[28] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReleaseLeaseResponse.ProtoReflect.Descriptor instead. +func (*ReleaseLeaseResponse) Descriptor() ([]byte, []int) { + return file_jumpstarter_v1_jumpstarter_proto_rawDescGZIP(), []int{28} +} + +type ListLeasesRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ListLeasesRequest) Reset() { + *x = ListLeasesRequest{} + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListLeasesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListLeasesRequest) ProtoMessage() {} + +func (x *ListLeasesRequest) ProtoReflect() protoreflect.Message { + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[29] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListLeasesRequest.ProtoReflect.Descriptor instead. +func (*ListLeasesRequest) Descriptor() ([]byte, []int) { + return file_jumpstarter_v1_jumpstarter_proto_rawDescGZIP(), []int{29} +} + +type ListLeasesResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Names []string `protobuf:"bytes,1,rep,name=names,proto3" json:"names,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ListLeasesResponse) Reset() { + *x = ListLeasesResponse{} + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListLeasesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListLeasesResponse) ProtoMessage() {} + +func (x *ListLeasesResponse) ProtoReflect() protoreflect.Message { + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[30] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListLeasesResponse.ProtoReflect.Descriptor instead. +func (*ListLeasesResponse) Descriptor() ([]byte, []int) { + return file_jumpstarter_v1_jumpstarter_proto_rawDescGZIP(), []int{30} +} + +func (x *ListLeasesResponse) GetNames() []string { + if x != nil { + return x.Names + } + return nil +} + +type GetStatusRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetStatusRequest) Reset() { + *x = GetStatusRequest{} + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetStatusRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetStatusRequest) ProtoMessage() {} + +func (x *GetStatusRequest) ProtoReflect() protoreflect.Message { + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[31] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetStatusRequest.ProtoReflect.Descriptor instead. +func (*GetStatusRequest) Descriptor() ([]byte, []int) { + return file_jumpstarter_v1_jumpstarter_proto_rawDescGZIP(), []int{31} +} + +type GetStatusResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Status ExporterStatus `protobuf:"varint,1,opt,name=status,proto3,enum=jumpstarter.v1.ExporterStatus" json:"status,omitempty"` + Message *string `protobuf:"bytes,2,opt,name=message,proto3,oneof" json:"message,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetStatusResponse) Reset() { + *x = GetStatusResponse{} + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetStatusResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetStatusResponse) ProtoMessage() {} + +func (x *GetStatusResponse) ProtoReflect() protoreflect.Message { + mi := &file_jumpstarter_v1_jumpstarter_proto_msgTypes[32] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetStatusResponse.ProtoReflect.Descriptor instead. +func (*GetStatusResponse) Descriptor() ([]byte, []int) { + return file_jumpstarter_v1_jumpstarter_proto_rawDescGZIP(), []int{32} +} + +func (x *GetStatusResponse) GetStatus() ExporterStatus { + if x != nil { + return x.Status + } + return ExporterStatus_EXPORTER_STATUS_UNSPECIFIED +} + +func (x *GetStatusResponse) GetMessage() string { + if x != nil && x.Message != nil { + return *x.Message + } + return "" +} + +var File_jumpstarter_v1_jumpstarter_proto protoreflect.FileDescriptor + +const file_jumpstarter_v1_jumpstarter_proto_rawDesc = "" + + "\n" + + " jumpstarter/v1/jumpstarter.proto\x12\x0ejumpstarter.v1\x1a\x1egoogle/protobuf/duration.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1fjumpstarter/v1/kubernetes.proto\x1a\x1bjumpstarter/v1/common.proto\"\xd1\x01\n" + + "\x0fRegisterRequest\x12C\n" + + "\x06labels\x18\x01 \x03(\v2+.jumpstarter.v1.RegisterRequest.LabelsEntryR\x06labels\x12>\n" + + "\areports\x18\x02 \x03(\v2$.jumpstarter.v1.DriverInstanceReportR\areports\x1a9\n" + + "\vLabelsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"\xd2\x03\n" + + "\x14DriverInstanceReport\x12\x12\n" + + "\x04uuid\x18\x01 \x01(\tR\x04uuid\x12$\n" + + "\vparent_uuid\x18\x02 \x01(\tH\x00R\n" + + "parentUuid\x88\x01\x01\x12H\n" + + "\x06labels\x18\x03 \x03(\v20.jumpstarter.v1.DriverInstanceReport.LabelsEntryR\x06labels\x12%\n" + + "\vdescription\x18\x04 \x01(\tH\x01R\vdescription\x88\x01\x01\x12m\n" + + "\x13methods_description\x18\x05 \x03(\v2<.jumpstarter.v1.DriverInstanceReport.MethodsDescriptionEntryR\x12methodsDescription\x1a9\n" + + "\vLabelsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\x1aE\n" + + "\x17MethodsDescriptionEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01B\x0e\n" + + "\f_parent_uuidB\x0e\n" + + "\f_description\"&\n" + + "\x10RegisterResponse\x12\x12\n" + + "\x04uuid\x18\x01 \x01(\tR\x04uuid\"+\n" + + "\x11UnregisterRequest\x12\x16\n" + + "\x06reason\x18\x02 \x01(\tR\x06reason\"\x14\n" + + "\x12UnregisterResponse\".\n" + + "\rListenRequest\x12\x1d\n" + + "\n" + + "lease_name\x18\x01 \x01(\tR\tleaseName\"\\\n" + + "\x0eListenResponse\x12'\n" + + "\x0frouter_endpoint\x18\x01 \x01(\tR\x0erouterEndpoint\x12!\n" + + "\frouter_token\x18\x02 \x01(\tR\vrouterToken\"\x0f\n" + + "\rStatusRequest\"\x91\x01\n" + + "\x0eStatusResponse\x12\x16\n" + + "\x06leased\x18\x01 \x01(\bR\x06leased\x12\"\n" + + "\n" + + "lease_name\x18\x02 \x01(\tH\x00R\tleaseName\x88\x01\x01\x12$\n" + + "\vclient_name\x18\x03 \x01(\tH\x01R\n" + + "clientName\x88\x01\x01B\r\n" + + "\v_lease_nameB\x0e\n" + + "\f_client_name\",\n" + + "\vDialRequest\x12\x1d\n" + + "\n" + + "lease_name\x18\x01 \x01(\tR\tleaseName\"Z\n" + + "\fDialResponse\x12'\n" + + "\x0frouter_endpoint\x18\x01 \x01(\tR\x0erouterEndpoint\x12!\n" + + "\frouter_token\x18\x02 \x01(\tR\vrouterToken\"\xa1\x01\n" + + "\x12AuditStreamRequest\x12#\n" + + "\rexporter_uuid\x18\x01 \x01(\tR\fexporterUuid\x120\n" + + "\x14driver_instance_uuid\x18\x02 \x01(\tR\x12driverInstanceUuid\x12\x1a\n" + + "\bseverity\x18\x03 \x01(\tR\bseverity\x12\x18\n" + + "\amessage\x18\x04 \x01(\tR\amessage\"x\n" + + "\x13ReportStatusRequest\x126\n" + + "\x06status\x18\x01 \x01(\x0e2\x1e.jumpstarter.v1.ExporterStatusR\x06status\x12\x1d\n" + + "\amessage\x18\x02 \x01(\tH\x00R\amessage\x88\x01\x01B\n" + + "\n" + + "\b_message\"\x16\n" + + "\x14ReportStatusResponse\"\xb8\x02\n" + + "\x11GetReportResponse\x12\x12\n" + + "\x04uuid\x18\x01 \x01(\tR\x04uuid\x12E\n" + + "\x06labels\x18\x02 \x03(\v2-.jumpstarter.v1.GetReportResponse.LabelsEntryR\x06labels\x12>\n" + + "\areports\x18\x03 \x03(\v2$.jumpstarter.v1.DriverInstanceReportR\areports\x12M\n" + + "\x15alternative_endpoints\x18\x04 \x03(\v2\x18.jumpstarter.v1.EndpointR\x14alternativeEndpoints\x1a9\n" + + "\vLabelsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"\xa5\x01\n" + + "\bEndpoint\x12\x1a\n" + + "\bendpoint\x18\x01 \x01(\tR\bendpoint\x12 \n" + + "\vcertificate\x18\x02 \x01(\tR\vcertificate\x12-\n" + + "\x12client_certificate\x18\x03 \x01(\tR\x11clientCertificate\x12,\n" + + "\x12client_private_key\x18\x04 \x01(\tR\x10clientPrivateKey\"k\n" + + "\x11DriverCallRequest\x12\x12\n" + + "\x04uuid\x18\x01 \x01(\tR\x04uuid\x12\x16\n" + + "\x06method\x18\x02 \x01(\tR\x06method\x12*\n" + + "\x04args\x18\x03 \x03(\v2\x16.google.protobuf.ValueR\x04args\"X\n" + + "\x12DriverCallResponse\x12\x12\n" + + "\x04uuid\x18\x01 \x01(\tR\x04uuid\x12.\n" + + "\x06result\x18\x02 \x01(\v2\x16.google.protobuf.ValueR\x06result\"t\n" + + "\x1aStreamingDriverCallRequest\x12\x12\n" + + "\x04uuid\x18\x01 \x01(\tR\x04uuid\x12\x16\n" + + "\x06method\x18\x02 \x01(\tR\x06method\x12*\n" + + "\x04args\x18\x03 \x03(\v2\x16.google.protobuf.ValueR\x04args\"a\n" + + "\x1bStreamingDriverCallResponse\x12\x12\n" + + "\x04uuid\x18\x01 \x01(\tR\x04uuid\x12.\n" + + "\x06result\x18\x02 \x01(\v2\x16.google.protobuf.ValueR\x06result\"\xa0\x01\n" + + "\x11LogStreamResponse\x12\x12\n" + + "\x04uuid\x18\x01 \x01(\tR\x04uuid\x12\x1a\n" + + "\bseverity\x18\x02 \x01(\tR\bseverity\x12\x18\n" + + "\amessage\x18\x03 \x01(\tR\amessage\x126\n" + + "\x06source\x18\x04 \x01(\x0e2\x19.jumpstarter.v1.LogSourceH\x00R\x06source\x88\x01\x01B\t\n" + + "\a_source\"\x0e\n" + + "\fResetRequest\"\x0f\n" + + "\rResetResponse\"%\n" + + "\x0fGetLeaseRequest\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\"\x93\x03\n" + + "\x10GetLeaseResponse\x125\n" + + "\bduration\x18\x01 \x01(\v2\x19.google.protobuf.DurationR\bduration\x129\n" + + "\bselector\x18\x02 \x01(\v2\x1d.jumpstarter.v1.LabelSelectorR\bselector\x12>\n" + + "\n" + + "begin_time\x18\x03 \x01(\v2\x1a.google.protobuf.TimestampH\x00R\tbeginTime\x88\x01\x01\x12:\n" + + "\bend_time\x18\x04 \x01(\v2\x1a.google.protobuf.TimestampH\x01R\aendTime\x88\x01\x01\x12(\n" + + "\rexporter_uuid\x18\x05 \x01(\tH\x02R\fexporterUuid\x88\x01\x01\x129\n" + + "\n" + + "conditions\x18\x06 \x03(\v2\x19.jumpstarter.v1.ConditionR\n" + + "conditionsB\r\n" + + "\v_begin_timeB\v\n" + + "\t_end_timeB\x10\n" + + "\x0e_exporter_uuid\"\x87\x01\n" + + "\x13RequestLeaseRequest\x125\n" + + "\bduration\x18\x01 \x01(\v2\x19.google.protobuf.DurationR\bduration\x129\n" + + "\bselector\x18\x02 \x01(\v2\x1d.jumpstarter.v1.LabelSelectorR\bselector\"*\n" + + "\x14RequestLeaseResponse\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\")\n" + + "\x13ReleaseLeaseRequest\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\"\x16\n" + + "\x14ReleaseLeaseResponse\"\x13\n" + + "\x11ListLeasesRequest\"*\n" + + "\x12ListLeasesResponse\x12\x14\n" + + "\x05names\x18\x01 \x03(\tR\x05names\"\x12\n" + + "\x10GetStatusRequest\"v\n" + + "\x11GetStatusResponse\x126\n" + + "\x06status\x18\x01 \x01(\x0e2\x1e.jumpstarter.v1.ExporterStatusR\x06status\x12\x1d\n" + + "\amessage\x18\x02 \x01(\tH\x00R\amessage\x88\x01\x01B\n" + + "\n" + + "\b_message2\x92\a\n" + + "\x11ControllerService\x12M\n" + + "\bRegister\x12\x1f.jumpstarter.v1.RegisterRequest\x1a .jumpstarter.v1.RegisterResponse\x12S\n" + + "\n" + + "Unregister\x12!.jumpstarter.v1.UnregisterRequest\x1a\".jumpstarter.v1.UnregisterResponse\x12Y\n" + + "\fReportStatus\x12#.jumpstarter.v1.ReportStatusRequest\x1a$.jumpstarter.v1.ReportStatusResponse\x12I\n" + + "\x06Listen\x12\x1d.jumpstarter.v1.ListenRequest\x1a\x1e.jumpstarter.v1.ListenResponse0\x01\x12I\n" + + "\x06Status\x12\x1d.jumpstarter.v1.StatusRequest\x1a\x1e.jumpstarter.v1.StatusResponse0\x01\x12A\n" + + "\x04Dial\x12\x1b.jumpstarter.v1.DialRequest\x1a\x1c.jumpstarter.v1.DialResponse\x12K\n" + + "\vAuditStream\x12\".jumpstarter.v1.AuditStreamRequest\x1a\x16.google.protobuf.Empty(\x01\x12M\n" + + "\bGetLease\x12\x1f.jumpstarter.v1.GetLeaseRequest\x1a .jumpstarter.v1.GetLeaseResponse\x12Y\n" + + "\fRequestLease\x12#.jumpstarter.v1.RequestLeaseRequest\x1a$.jumpstarter.v1.RequestLeaseResponse\x12Y\n" + + "\fReleaseLease\x12#.jumpstarter.v1.ReleaseLeaseRequest\x1a$.jumpstarter.v1.ReleaseLeaseResponse\x12S\n" + + "\n" + + "ListLeases\x12!.jumpstarter.v1.ListLeasesRequest\x1a\".jumpstarter.v1.ListLeasesResponse2\x82\x04\n" + + "\x0fExporterService\x12F\n" + + "\tGetReport\x12\x16.google.protobuf.Empty\x1a!.jumpstarter.v1.GetReportResponse\x12S\n" + + "\n" + + "DriverCall\x12!.jumpstarter.v1.DriverCallRequest\x1a\".jumpstarter.v1.DriverCallResponse\x12p\n" + + "\x13StreamingDriverCall\x12*.jumpstarter.v1.StreamingDriverCallRequest\x1a+.jumpstarter.v1.StreamingDriverCallResponse0\x01\x12H\n" + + "\tLogStream\x12\x16.google.protobuf.Empty\x1a!.jumpstarter.v1.LogStreamResponse0\x01\x12D\n" + + "\x05Reset\x12\x1c.jumpstarter.v1.ResetRequest\x1a\x1d.jumpstarter.v1.ResetResponse\x12P\n" + + "\tGetStatus\x12 .jumpstarter.v1.GetStatusRequest\x1a!.jumpstarter.v1.GetStatusResponseB\xe1\x01\n" + + "\x12com.jumpstarter.v1B\x10JumpstarterProtoP\x01Z`github.com/jumpstarter-dev/jumpstarter-controller/internal/protocol/jumpstarter/v1;jumpstarterv1\xa2\x02\x03JXX\xaa\x02\x0eJumpstarter.V1\xca\x02\x0eJumpstarter\\V1\xe2\x02\x1aJumpstarter\\V1\\GPBMetadata\xea\x02\x0fJumpstarter::V1b\x06proto3" + +var ( + file_jumpstarter_v1_jumpstarter_proto_rawDescOnce sync.Once + file_jumpstarter_v1_jumpstarter_proto_rawDescData []byte +) + +func file_jumpstarter_v1_jumpstarter_proto_rawDescGZIP() []byte { + file_jumpstarter_v1_jumpstarter_proto_rawDescOnce.Do(func() { + file_jumpstarter_v1_jumpstarter_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_jumpstarter_v1_jumpstarter_proto_rawDesc), len(file_jumpstarter_v1_jumpstarter_proto_rawDesc))) + }) + return file_jumpstarter_v1_jumpstarter_proto_rawDescData +} + +var file_jumpstarter_v1_jumpstarter_proto_msgTypes = make([]protoimpl.MessageInfo, 37) +var file_jumpstarter_v1_jumpstarter_proto_goTypes = []any{ + (*RegisterRequest)(nil), // 0: jumpstarter.v1.RegisterRequest + (*DriverInstanceReport)(nil), // 1: jumpstarter.v1.DriverInstanceReport + (*RegisterResponse)(nil), // 2: jumpstarter.v1.RegisterResponse + (*UnregisterRequest)(nil), // 3: jumpstarter.v1.UnregisterRequest + (*UnregisterResponse)(nil), // 4: jumpstarter.v1.UnregisterResponse + (*ListenRequest)(nil), // 5: jumpstarter.v1.ListenRequest + (*ListenResponse)(nil), // 6: jumpstarter.v1.ListenResponse + (*StatusRequest)(nil), // 7: jumpstarter.v1.StatusRequest + (*StatusResponse)(nil), // 8: jumpstarter.v1.StatusResponse + (*DialRequest)(nil), // 9: jumpstarter.v1.DialRequest + (*DialResponse)(nil), // 10: jumpstarter.v1.DialResponse + (*AuditStreamRequest)(nil), // 11: jumpstarter.v1.AuditStreamRequest + (*ReportStatusRequest)(nil), // 12: jumpstarter.v1.ReportStatusRequest + (*ReportStatusResponse)(nil), // 13: jumpstarter.v1.ReportStatusResponse + (*GetReportResponse)(nil), // 14: jumpstarter.v1.GetReportResponse + (*Endpoint)(nil), // 15: jumpstarter.v1.Endpoint + (*DriverCallRequest)(nil), // 16: jumpstarter.v1.DriverCallRequest + (*DriverCallResponse)(nil), // 17: jumpstarter.v1.DriverCallResponse + (*StreamingDriverCallRequest)(nil), // 18: jumpstarter.v1.StreamingDriverCallRequest + (*StreamingDriverCallResponse)(nil), // 19: jumpstarter.v1.StreamingDriverCallResponse + (*LogStreamResponse)(nil), // 20: jumpstarter.v1.LogStreamResponse + (*ResetRequest)(nil), // 21: jumpstarter.v1.ResetRequest + (*ResetResponse)(nil), // 22: jumpstarter.v1.ResetResponse + (*GetLeaseRequest)(nil), // 23: jumpstarter.v1.GetLeaseRequest + (*GetLeaseResponse)(nil), // 24: jumpstarter.v1.GetLeaseResponse + (*RequestLeaseRequest)(nil), // 25: jumpstarter.v1.RequestLeaseRequest + (*RequestLeaseResponse)(nil), // 26: jumpstarter.v1.RequestLeaseResponse + (*ReleaseLeaseRequest)(nil), // 27: jumpstarter.v1.ReleaseLeaseRequest + (*ReleaseLeaseResponse)(nil), // 28: jumpstarter.v1.ReleaseLeaseResponse + (*ListLeasesRequest)(nil), // 29: jumpstarter.v1.ListLeasesRequest + (*ListLeasesResponse)(nil), // 30: jumpstarter.v1.ListLeasesResponse + (*GetStatusRequest)(nil), // 31: jumpstarter.v1.GetStatusRequest + (*GetStatusResponse)(nil), // 32: jumpstarter.v1.GetStatusResponse + nil, // 33: jumpstarter.v1.RegisterRequest.LabelsEntry + nil, // 34: jumpstarter.v1.DriverInstanceReport.LabelsEntry + nil, // 35: jumpstarter.v1.DriverInstanceReport.MethodsDescriptionEntry + nil, // 36: jumpstarter.v1.GetReportResponse.LabelsEntry + (ExporterStatus)(0), // 37: jumpstarter.v1.ExporterStatus + (*structpb.Value)(nil), // 38: google.protobuf.Value + (LogSource)(0), // 39: jumpstarter.v1.LogSource + (*durationpb.Duration)(nil), // 40: google.protobuf.Duration + (*LabelSelector)(nil), // 41: jumpstarter.v1.LabelSelector + (*timestamppb.Timestamp)(nil), // 42: google.protobuf.Timestamp + (*Condition)(nil), // 43: jumpstarter.v1.Condition + (*emptypb.Empty)(nil), // 44: google.protobuf.Empty +} +var file_jumpstarter_v1_jumpstarter_proto_depIdxs = []int32{ + 33, // 0: jumpstarter.v1.RegisterRequest.labels:type_name -> jumpstarter.v1.RegisterRequest.LabelsEntry + 1, // 1: jumpstarter.v1.RegisterRequest.reports:type_name -> jumpstarter.v1.DriverInstanceReport + 34, // 2: jumpstarter.v1.DriverInstanceReport.labels:type_name -> jumpstarter.v1.DriverInstanceReport.LabelsEntry + 35, // 3: jumpstarter.v1.DriverInstanceReport.methods_description:type_name -> jumpstarter.v1.DriverInstanceReport.MethodsDescriptionEntry + 37, // 4: jumpstarter.v1.ReportStatusRequest.status:type_name -> jumpstarter.v1.ExporterStatus + 36, // 5: jumpstarter.v1.GetReportResponse.labels:type_name -> jumpstarter.v1.GetReportResponse.LabelsEntry + 1, // 6: jumpstarter.v1.GetReportResponse.reports:type_name -> jumpstarter.v1.DriverInstanceReport + 15, // 7: jumpstarter.v1.GetReportResponse.alternative_endpoints:type_name -> jumpstarter.v1.Endpoint + 38, // 8: jumpstarter.v1.DriverCallRequest.args:type_name -> google.protobuf.Value + 38, // 9: jumpstarter.v1.DriverCallResponse.result:type_name -> google.protobuf.Value + 38, // 10: jumpstarter.v1.StreamingDriverCallRequest.args:type_name -> google.protobuf.Value + 38, // 11: jumpstarter.v1.StreamingDriverCallResponse.result:type_name -> google.protobuf.Value + 39, // 12: jumpstarter.v1.LogStreamResponse.source:type_name -> jumpstarter.v1.LogSource + 40, // 13: jumpstarter.v1.GetLeaseResponse.duration:type_name -> google.protobuf.Duration + 41, // 14: jumpstarter.v1.GetLeaseResponse.selector:type_name -> jumpstarter.v1.LabelSelector + 42, // 15: jumpstarter.v1.GetLeaseResponse.begin_time:type_name -> google.protobuf.Timestamp + 42, // 16: jumpstarter.v1.GetLeaseResponse.end_time:type_name -> google.protobuf.Timestamp + 43, // 17: jumpstarter.v1.GetLeaseResponse.conditions:type_name -> jumpstarter.v1.Condition + 40, // 18: jumpstarter.v1.RequestLeaseRequest.duration:type_name -> google.protobuf.Duration + 41, // 19: jumpstarter.v1.RequestLeaseRequest.selector:type_name -> jumpstarter.v1.LabelSelector + 37, // 20: jumpstarter.v1.GetStatusResponse.status:type_name -> jumpstarter.v1.ExporterStatus + 0, // 21: jumpstarter.v1.ControllerService.Register:input_type -> jumpstarter.v1.RegisterRequest + 3, // 22: jumpstarter.v1.ControllerService.Unregister:input_type -> jumpstarter.v1.UnregisterRequest + 12, // 23: jumpstarter.v1.ControllerService.ReportStatus:input_type -> jumpstarter.v1.ReportStatusRequest + 5, // 24: jumpstarter.v1.ControllerService.Listen:input_type -> jumpstarter.v1.ListenRequest + 7, // 25: jumpstarter.v1.ControllerService.Status:input_type -> jumpstarter.v1.StatusRequest + 9, // 26: jumpstarter.v1.ControllerService.Dial:input_type -> jumpstarter.v1.DialRequest + 11, // 27: jumpstarter.v1.ControllerService.AuditStream:input_type -> jumpstarter.v1.AuditStreamRequest + 23, // 28: jumpstarter.v1.ControllerService.GetLease:input_type -> jumpstarter.v1.GetLeaseRequest + 25, // 29: jumpstarter.v1.ControllerService.RequestLease:input_type -> jumpstarter.v1.RequestLeaseRequest + 27, // 30: jumpstarter.v1.ControllerService.ReleaseLease:input_type -> jumpstarter.v1.ReleaseLeaseRequest + 29, // 31: jumpstarter.v1.ControllerService.ListLeases:input_type -> jumpstarter.v1.ListLeasesRequest + 44, // 32: jumpstarter.v1.ExporterService.GetReport:input_type -> google.protobuf.Empty + 16, // 33: jumpstarter.v1.ExporterService.DriverCall:input_type -> jumpstarter.v1.DriverCallRequest + 18, // 34: jumpstarter.v1.ExporterService.StreamingDriverCall:input_type -> jumpstarter.v1.StreamingDriverCallRequest + 44, // 35: jumpstarter.v1.ExporterService.LogStream:input_type -> google.protobuf.Empty + 21, // 36: jumpstarter.v1.ExporterService.Reset:input_type -> jumpstarter.v1.ResetRequest + 31, // 37: jumpstarter.v1.ExporterService.GetStatus:input_type -> jumpstarter.v1.GetStatusRequest + 2, // 38: jumpstarter.v1.ControllerService.Register:output_type -> jumpstarter.v1.RegisterResponse + 4, // 39: jumpstarter.v1.ControllerService.Unregister:output_type -> jumpstarter.v1.UnregisterResponse + 13, // 40: jumpstarter.v1.ControllerService.ReportStatus:output_type -> jumpstarter.v1.ReportStatusResponse + 6, // 41: jumpstarter.v1.ControllerService.Listen:output_type -> jumpstarter.v1.ListenResponse + 8, // 42: jumpstarter.v1.ControllerService.Status:output_type -> jumpstarter.v1.StatusResponse + 10, // 43: jumpstarter.v1.ControllerService.Dial:output_type -> jumpstarter.v1.DialResponse + 44, // 44: jumpstarter.v1.ControllerService.AuditStream:output_type -> google.protobuf.Empty + 24, // 45: jumpstarter.v1.ControllerService.GetLease:output_type -> jumpstarter.v1.GetLeaseResponse + 26, // 46: jumpstarter.v1.ControllerService.RequestLease:output_type -> jumpstarter.v1.RequestLeaseResponse + 28, // 47: jumpstarter.v1.ControllerService.ReleaseLease:output_type -> jumpstarter.v1.ReleaseLeaseResponse + 30, // 48: jumpstarter.v1.ControllerService.ListLeases:output_type -> jumpstarter.v1.ListLeasesResponse + 14, // 49: jumpstarter.v1.ExporterService.GetReport:output_type -> jumpstarter.v1.GetReportResponse + 17, // 50: jumpstarter.v1.ExporterService.DriverCall:output_type -> jumpstarter.v1.DriverCallResponse + 19, // 51: jumpstarter.v1.ExporterService.StreamingDriverCall:output_type -> jumpstarter.v1.StreamingDriverCallResponse + 20, // 52: jumpstarter.v1.ExporterService.LogStream:output_type -> jumpstarter.v1.LogStreamResponse + 22, // 53: jumpstarter.v1.ExporterService.Reset:output_type -> jumpstarter.v1.ResetResponse + 32, // 54: jumpstarter.v1.ExporterService.GetStatus:output_type -> jumpstarter.v1.GetStatusResponse + 38, // [38:55] is the sub-list for method output_type + 21, // [21:38] is the sub-list for method input_type + 21, // [21:21] is the sub-list for extension type_name + 21, // [21:21] is the sub-list for extension extendee + 0, // [0:21] is the sub-list for field type_name +} + +func init() { file_jumpstarter_v1_jumpstarter_proto_init() } +func file_jumpstarter_v1_jumpstarter_proto_init() { + if File_jumpstarter_v1_jumpstarter_proto != nil { + return + } + file_jumpstarter_v1_kubernetes_proto_init() + file_jumpstarter_v1_common_proto_init() + file_jumpstarter_v1_jumpstarter_proto_msgTypes[1].OneofWrappers = []any{} + file_jumpstarter_v1_jumpstarter_proto_msgTypes[8].OneofWrappers = []any{} + file_jumpstarter_v1_jumpstarter_proto_msgTypes[12].OneofWrappers = []any{} + file_jumpstarter_v1_jumpstarter_proto_msgTypes[20].OneofWrappers = []any{} + file_jumpstarter_v1_jumpstarter_proto_msgTypes[24].OneofWrappers = []any{} + file_jumpstarter_v1_jumpstarter_proto_msgTypes[32].OneofWrappers = []any{} + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_jumpstarter_v1_jumpstarter_proto_rawDesc), len(file_jumpstarter_v1_jumpstarter_proto_rawDesc)), + NumEnums: 0, + NumMessages: 37, + NumExtensions: 0, + NumServices: 2, + }, + GoTypes: file_jumpstarter_v1_jumpstarter_proto_goTypes, + DependencyIndexes: file_jumpstarter_v1_jumpstarter_proto_depIdxs, + MessageInfos: file_jumpstarter_v1_jumpstarter_proto_msgTypes, + }.Build() + File_jumpstarter_v1_jumpstarter_proto = out.File + file_jumpstarter_v1_jumpstarter_proto_goTypes = nil + file_jumpstarter_v1_jumpstarter_proto_depIdxs = nil +} diff --git a/controller/internal/protocol/jumpstarter/v1/jumpstarter_grpc.pb.go b/controller/internal/protocol/jumpstarter/v1/jumpstarter_grpc.pb.go new file mode 100644 index 000000000..d0fbd1bd1 --- /dev/null +++ b/controller/internal/protocol/jumpstarter/v1/jumpstarter_grpc.pb.go @@ -0,0 +1,855 @@ +// Copyright 2024 The Jumpstarter Authors + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.5.1 +// - protoc (unknown) +// source: jumpstarter/v1/jumpstarter.proto + +package jumpstarterv1 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + emptypb "google.golang.org/protobuf/types/known/emptypb" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 + +const ( + ControllerService_Register_FullMethodName = "/jumpstarter.v1.ControllerService/Register" + ControllerService_Unregister_FullMethodName = "/jumpstarter.v1.ControllerService/Unregister" + ControllerService_ReportStatus_FullMethodName = "/jumpstarter.v1.ControllerService/ReportStatus" + ControllerService_Listen_FullMethodName = "/jumpstarter.v1.ControllerService/Listen" + ControllerService_Status_FullMethodName = "/jumpstarter.v1.ControllerService/Status" + ControllerService_Dial_FullMethodName = "/jumpstarter.v1.ControllerService/Dial" + ControllerService_AuditStream_FullMethodName = "/jumpstarter.v1.ControllerService/AuditStream" + ControllerService_GetLease_FullMethodName = "/jumpstarter.v1.ControllerService/GetLease" + ControllerService_RequestLease_FullMethodName = "/jumpstarter.v1.ControllerService/RequestLease" + ControllerService_ReleaseLease_FullMethodName = "/jumpstarter.v1.ControllerService/ReleaseLease" + ControllerService_ListLeases_FullMethodName = "/jumpstarter.v1.ControllerService/ListLeases" +) + +// ControllerServiceClient is the client API for ControllerService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +// +// A service where a exporter can connect to make itself available +type ControllerServiceClient interface { + // Exporter registration + Register(ctx context.Context, in *RegisterRequest, opts ...grpc.CallOption) (*RegisterResponse, error) + // Exporter disconnection + // Disconnecting with bye will invalidate any existing router tokens + // we will eventually have a mechanism to tell the router this token + // has been invalidated + Unregister(ctx context.Context, in *UnregisterRequest, opts ...grpc.CallOption) (*UnregisterResponse, error) + // Exporter status report + // Allows exporters to report their own status to the controller + ReportStatus(ctx context.Context, in *ReportStatusRequest, opts ...grpc.CallOption) (*ReportStatusResponse, error) + // Exporter listening + // Returns stream tokens for accepting incoming client connections + Listen(ctx context.Context, in *ListenRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[ListenResponse], error) + // Exporter status + // Returns lease status for the exporter + Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[StatusResponse], error) + // Client connecting + // Returns stream token for connecting to the desired exporter + // Leases are checked before token issuance + Dial(ctx context.Context, in *DialRequest, opts ...grpc.CallOption) (*DialResponse, error) + // Audit events from the exporters + // audit events are used to track the exporter's activity + AuditStream(ctx context.Context, opts ...grpc.CallOption) (grpc.ClientStreamingClient[AuditStreamRequest, emptypb.Empty], error) + // Get Lease + GetLease(ctx context.Context, in *GetLeaseRequest, opts ...grpc.CallOption) (*GetLeaseResponse, error) + // Request Lease + RequestLease(ctx context.Context, in *RequestLeaseRequest, opts ...grpc.CallOption) (*RequestLeaseResponse, error) + // Release Lease + ReleaseLease(ctx context.Context, in *ReleaseLeaseRequest, opts ...grpc.CallOption) (*ReleaseLeaseResponse, error) + // List Leases + ListLeases(ctx context.Context, in *ListLeasesRequest, opts ...grpc.CallOption) (*ListLeasesResponse, error) +} + +type controllerServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewControllerServiceClient(cc grpc.ClientConnInterface) ControllerServiceClient { + return &controllerServiceClient{cc} +} + +func (c *controllerServiceClient) Register(ctx context.Context, in *RegisterRequest, opts ...grpc.CallOption) (*RegisterResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(RegisterResponse) + err := c.cc.Invoke(ctx, ControllerService_Register_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerServiceClient) Unregister(ctx context.Context, in *UnregisterRequest, opts ...grpc.CallOption) (*UnregisterResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(UnregisterResponse) + err := c.cc.Invoke(ctx, ControllerService_Unregister_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerServiceClient) ReportStatus(ctx context.Context, in *ReportStatusRequest, opts ...grpc.CallOption) (*ReportStatusResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(ReportStatusResponse) + err := c.cc.Invoke(ctx, ControllerService_ReportStatus_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerServiceClient) Listen(ctx context.Context, in *ListenRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[ListenResponse], error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &ControllerService_ServiceDesc.Streams[0], ControllerService_Listen_FullMethodName, cOpts...) + if err != nil { + return nil, err + } + x := &grpc.GenericClientStream[ListenRequest, ListenResponse]{ClientStream: stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type ControllerService_ListenClient = grpc.ServerStreamingClient[ListenResponse] + +func (c *controllerServiceClient) Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[StatusResponse], error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &ControllerService_ServiceDesc.Streams[1], ControllerService_Status_FullMethodName, cOpts...) + if err != nil { + return nil, err + } + x := &grpc.GenericClientStream[StatusRequest, StatusResponse]{ClientStream: stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type ControllerService_StatusClient = grpc.ServerStreamingClient[StatusResponse] + +func (c *controllerServiceClient) Dial(ctx context.Context, in *DialRequest, opts ...grpc.CallOption) (*DialResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(DialResponse) + err := c.cc.Invoke(ctx, ControllerService_Dial_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerServiceClient) AuditStream(ctx context.Context, opts ...grpc.CallOption) (grpc.ClientStreamingClient[AuditStreamRequest, emptypb.Empty], error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &ControllerService_ServiceDesc.Streams[2], ControllerService_AuditStream_FullMethodName, cOpts...) + if err != nil { + return nil, err + } + x := &grpc.GenericClientStream[AuditStreamRequest, emptypb.Empty]{ClientStream: stream} + return x, nil +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type ControllerService_AuditStreamClient = grpc.ClientStreamingClient[AuditStreamRequest, emptypb.Empty] + +func (c *controllerServiceClient) GetLease(ctx context.Context, in *GetLeaseRequest, opts ...grpc.CallOption) (*GetLeaseResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(GetLeaseResponse) + err := c.cc.Invoke(ctx, ControllerService_GetLease_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerServiceClient) RequestLease(ctx context.Context, in *RequestLeaseRequest, opts ...grpc.CallOption) (*RequestLeaseResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(RequestLeaseResponse) + err := c.cc.Invoke(ctx, ControllerService_RequestLease_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerServiceClient) ReleaseLease(ctx context.Context, in *ReleaseLeaseRequest, opts ...grpc.CallOption) (*ReleaseLeaseResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(ReleaseLeaseResponse) + err := c.cc.Invoke(ctx, ControllerService_ReleaseLease_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerServiceClient) ListLeases(ctx context.Context, in *ListLeasesRequest, opts ...grpc.CallOption) (*ListLeasesResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(ListLeasesResponse) + err := c.cc.Invoke(ctx, ControllerService_ListLeases_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ControllerServiceServer is the server API for ControllerService service. +// All implementations must embed UnimplementedControllerServiceServer +// for forward compatibility. +// +// A service where a exporter can connect to make itself available +type ControllerServiceServer interface { + // Exporter registration + Register(context.Context, *RegisterRequest) (*RegisterResponse, error) + // Exporter disconnection + // Disconnecting with bye will invalidate any existing router tokens + // we will eventually have a mechanism to tell the router this token + // has been invalidated + Unregister(context.Context, *UnregisterRequest) (*UnregisterResponse, error) + // Exporter status report + // Allows exporters to report their own status to the controller + ReportStatus(context.Context, *ReportStatusRequest) (*ReportStatusResponse, error) + // Exporter listening + // Returns stream tokens for accepting incoming client connections + Listen(*ListenRequest, grpc.ServerStreamingServer[ListenResponse]) error + // Exporter status + // Returns lease status for the exporter + Status(*StatusRequest, grpc.ServerStreamingServer[StatusResponse]) error + // Client connecting + // Returns stream token for connecting to the desired exporter + // Leases are checked before token issuance + Dial(context.Context, *DialRequest) (*DialResponse, error) + // Audit events from the exporters + // audit events are used to track the exporter's activity + AuditStream(grpc.ClientStreamingServer[AuditStreamRequest, emptypb.Empty]) error + // Get Lease + GetLease(context.Context, *GetLeaseRequest) (*GetLeaseResponse, error) + // Request Lease + RequestLease(context.Context, *RequestLeaseRequest) (*RequestLeaseResponse, error) + // Release Lease + ReleaseLease(context.Context, *ReleaseLeaseRequest) (*ReleaseLeaseResponse, error) + // List Leases + ListLeases(context.Context, *ListLeasesRequest) (*ListLeasesResponse, error) + mustEmbedUnimplementedControllerServiceServer() +} + +// UnimplementedControllerServiceServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedControllerServiceServer struct{} + +func (UnimplementedControllerServiceServer) Register(context.Context, *RegisterRequest) (*RegisterResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Register not implemented") +} +func (UnimplementedControllerServiceServer) Unregister(context.Context, *UnregisterRequest) (*UnregisterResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Unregister not implemented") +} +func (UnimplementedControllerServiceServer) ReportStatus(context.Context, *ReportStatusRequest) (*ReportStatusResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReportStatus not implemented") +} +func (UnimplementedControllerServiceServer) Listen(*ListenRequest, grpc.ServerStreamingServer[ListenResponse]) error { + return status.Errorf(codes.Unimplemented, "method Listen not implemented") +} +func (UnimplementedControllerServiceServer) Status(*StatusRequest, grpc.ServerStreamingServer[StatusResponse]) error { + return status.Errorf(codes.Unimplemented, "method Status not implemented") +} +func (UnimplementedControllerServiceServer) Dial(context.Context, *DialRequest) (*DialResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Dial not implemented") +} +func (UnimplementedControllerServiceServer) AuditStream(grpc.ClientStreamingServer[AuditStreamRequest, emptypb.Empty]) error { + return status.Errorf(codes.Unimplemented, "method AuditStream not implemented") +} +func (UnimplementedControllerServiceServer) GetLease(context.Context, *GetLeaseRequest) (*GetLeaseResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetLease not implemented") +} +func (UnimplementedControllerServiceServer) RequestLease(context.Context, *RequestLeaseRequest) (*RequestLeaseResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RequestLease not implemented") +} +func (UnimplementedControllerServiceServer) ReleaseLease(context.Context, *ReleaseLeaseRequest) (*ReleaseLeaseResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReleaseLease not implemented") +} +func (UnimplementedControllerServiceServer) ListLeases(context.Context, *ListLeasesRequest) (*ListLeasesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListLeases not implemented") +} +func (UnimplementedControllerServiceServer) mustEmbedUnimplementedControllerServiceServer() {} +func (UnimplementedControllerServiceServer) testEmbeddedByValue() {} + +// UnsafeControllerServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ControllerServiceServer will +// result in compilation errors. +type UnsafeControllerServiceServer interface { + mustEmbedUnimplementedControllerServiceServer() +} + +func RegisterControllerServiceServer(s grpc.ServiceRegistrar, srv ControllerServiceServer) { + // If the following call pancis, it indicates UnimplementedControllerServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&ControllerService_ServiceDesc, srv) +} + +func _ControllerService_Register_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RegisterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServiceServer).Register(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ControllerService_Register_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServiceServer).Register(ctx, req.(*RegisterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ControllerService_Unregister_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UnregisterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServiceServer).Unregister(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ControllerService_Unregister_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServiceServer).Unregister(ctx, req.(*UnregisterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ControllerService_ReportStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReportStatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServiceServer).ReportStatus(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ControllerService_ReportStatus_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServiceServer).ReportStatus(ctx, req.(*ReportStatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ControllerService_Listen_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(ListenRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(ControllerServiceServer).Listen(m, &grpc.GenericServerStream[ListenRequest, ListenResponse]{ServerStream: stream}) +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type ControllerService_ListenServer = grpc.ServerStreamingServer[ListenResponse] + +func _ControllerService_Status_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(StatusRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(ControllerServiceServer).Status(m, &grpc.GenericServerStream[StatusRequest, StatusResponse]{ServerStream: stream}) +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type ControllerService_StatusServer = grpc.ServerStreamingServer[StatusResponse] + +func _ControllerService_Dial_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DialRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServiceServer).Dial(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ControllerService_Dial_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServiceServer).Dial(ctx, req.(*DialRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ControllerService_AuditStream_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(ControllerServiceServer).AuditStream(&grpc.GenericServerStream[AuditStreamRequest, emptypb.Empty]{ServerStream: stream}) +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type ControllerService_AuditStreamServer = grpc.ClientStreamingServer[AuditStreamRequest, emptypb.Empty] + +func _ControllerService_GetLease_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetLeaseRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServiceServer).GetLease(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ControllerService_GetLease_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServiceServer).GetLease(ctx, req.(*GetLeaseRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ControllerService_RequestLease_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestLeaseRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServiceServer).RequestLease(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ControllerService_RequestLease_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServiceServer).RequestLease(ctx, req.(*RequestLeaseRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ControllerService_ReleaseLease_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReleaseLeaseRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServiceServer).ReleaseLease(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ControllerService_ReleaseLease_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServiceServer).ReleaseLease(ctx, req.(*ReleaseLeaseRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ControllerService_ListLeases_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListLeasesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServiceServer).ListLeases(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ControllerService_ListLeases_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServiceServer).ListLeases(ctx, req.(*ListLeasesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// ControllerService_ServiceDesc is the grpc.ServiceDesc for ControllerService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var ControllerService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "jumpstarter.v1.ControllerService", + HandlerType: (*ControllerServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Register", + Handler: _ControllerService_Register_Handler, + }, + { + MethodName: "Unregister", + Handler: _ControllerService_Unregister_Handler, + }, + { + MethodName: "ReportStatus", + Handler: _ControllerService_ReportStatus_Handler, + }, + { + MethodName: "Dial", + Handler: _ControllerService_Dial_Handler, + }, + { + MethodName: "GetLease", + Handler: _ControllerService_GetLease_Handler, + }, + { + MethodName: "RequestLease", + Handler: _ControllerService_RequestLease_Handler, + }, + { + MethodName: "ReleaseLease", + Handler: _ControllerService_ReleaseLease_Handler, + }, + { + MethodName: "ListLeases", + Handler: _ControllerService_ListLeases_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Listen", + Handler: _ControllerService_Listen_Handler, + ServerStreams: true, + }, + { + StreamName: "Status", + Handler: _ControllerService_Status_Handler, + ServerStreams: true, + }, + { + StreamName: "AuditStream", + Handler: _ControllerService_AuditStream_Handler, + ClientStreams: true, + }, + }, + Metadata: "jumpstarter/v1/jumpstarter.proto", +} + +const ( + ExporterService_GetReport_FullMethodName = "/jumpstarter.v1.ExporterService/GetReport" + ExporterService_DriverCall_FullMethodName = "/jumpstarter.v1.ExporterService/DriverCall" + ExporterService_StreamingDriverCall_FullMethodName = "/jumpstarter.v1.ExporterService/StreamingDriverCall" + ExporterService_LogStream_FullMethodName = "/jumpstarter.v1.ExporterService/LogStream" + ExporterService_Reset_FullMethodName = "/jumpstarter.v1.ExporterService/Reset" + ExporterService_GetStatus_FullMethodName = "/jumpstarter.v1.ExporterService/GetStatus" +) + +// ExporterServiceClient is the client API for ExporterService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +// +// A service a exporter can share locally to be used without a server +// Channel/Call credentials are used to authenticate the client, and routing to the right exporter +type ExporterServiceClient interface { + // Exporter registration + GetReport(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*GetReportResponse, error) + DriverCall(ctx context.Context, in *DriverCallRequest, opts ...grpc.CallOption) (*DriverCallResponse, error) + StreamingDriverCall(ctx context.Context, in *StreamingDriverCallRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[StreamingDriverCallResponse], error) + LogStream(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (grpc.ServerStreamingClient[LogStreamResponse], error) + Reset(ctx context.Context, in *ResetRequest, opts ...grpc.CallOption) (*ResetResponse, error) + GetStatus(ctx context.Context, in *GetStatusRequest, opts ...grpc.CallOption) (*GetStatusResponse, error) +} + +type exporterServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewExporterServiceClient(cc grpc.ClientConnInterface) ExporterServiceClient { + return &exporterServiceClient{cc} +} + +func (c *exporterServiceClient) GetReport(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*GetReportResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(GetReportResponse) + err := c.cc.Invoke(ctx, ExporterService_GetReport_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *exporterServiceClient) DriverCall(ctx context.Context, in *DriverCallRequest, opts ...grpc.CallOption) (*DriverCallResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(DriverCallResponse) + err := c.cc.Invoke(ctx, ExporterService_DriverCall_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *exporterServiceClient) StreamingDriverCall(ctx context.Context, in *StreamingDriverCallRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[StreamingDriverCallResponse], error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &ExporterService_ServiceDesc.Streams[0], ExporterService_StreamingDriverCall_FullMethodName, cOpts...) + if err != nil { + return nil, err + } + x := &grpc.GenericClientStream[StreamingDriverCallRequest, StreamingDriverCallResponse]{ClientStream: stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type ExporterService_StreamingDriverCallClient = grpc.ServerStreamingClient[StreamingDriverCallResponse] + +func (c *exporterServiceClient) LogStream(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (grpc.ServerStreamingClient[LogStreamResponse], error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &ExporterService_ServiceDesc.Streams[1], ExporterService_LogStream_FullMethodName, cOpts...) + if err != nil { + return nil, err + } + x := &grpc.GenericClientStream[emptypb.Empty, LogStreamResponse]{ClientStream: stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type ExporterService_LogStreamClient = grpc.ServerStreamingClient[LogStreamResponse] + +func (c *exporterServiceClient) Reset(ctx context.Context, in *ResetRequest, opts ...grpc.CallOption) (*ResetResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(ResetResponse) + err := c.cc.Invoke(ctx, ExporterService_Reset_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *exporterServiceClient) GetStatus(ctx context.Context, in *GetStatusRequest, opts ...grpc.CallOption) (*GetStatusResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(GetStatusResponse) + err := c.cc.Invoke(ctx, ExporterService_GetStatus_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ExporterServiceServer is the server API for ExporterService service. +// All implementations must embed UnimplementedExporterServiceServer +// for forward compatibility. +// +// A service a exporter can share locally to be used without a server +// Channel/Call credentials are used to authenticate the client, and routing to the right exporter +type ExporterServiceServer interface { + // Exporter registration + GetReport(context.Context, *emptypb.Empty) (*GetReportResponse, error) + DriverCall(context.Context, *DriverCallRequest) (*DriverCallResponse, error) + StreamingDriverCall(*StreamingDriverCallRequest, grpc.ServerStreamingServer[StreamingDriverCallResponse]) error + LogStream(*emptypb.Empty, grpc.ServerStreamingServer[LogStreamResponse]) error + Reset(context.Context, *ResetRequest) (*ResetResponse, error) + GetStatus(context.Context, *GetStatusRequest) (*GetStatusResponse, error) + mustEmbedUnimplementedExporterServiceServer() +} + +// UnimplementedExporterServiceServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedExporterServiceServer struct{} + +func (UnimplementedExporterServiceServer) GetReport(context.Context, *emptypb.Empty) (*GetReportResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetReport not implemented") +} +func (UnimplementedExporterServiceServer) DriverCall(context.Context, *DriverCallRequest) (*DriverCallResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DriverCall not implemented") +} +func (UnimplementedExporterServiceServer) StreamingDriverCall(*StreamingDriverCallRequest, grpc.ServerStreamingServer[StreamingDriverCallResponse]) error { + return status.Errorf(codes.Unimplemented, "method StreamingDriverCall not implemented") +} +func (UnimplementedExporterServiceServer) LogStream(*emptypb.Empty, grpc.ServerStreamingServer[LogStreamResponse]) error { + return status.Errorf(codes.Unimplemented, "method LogStream not implemented") +} +func (UnimplementedExporterServiceServer) Reset(context.Context, *ResetRequest) (*ResetResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Reset not implemented") +} +func (UnimplementedExporterServiceServer) GetStatus(context.Context, *GetStatusRequest) (*GetStatusResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetStatus not implemented") +} +func (UnimplementedExporterServiceServer) mustEmbedUnimplementedExporterServiceServer() {} +func (UnimplementedExporterServiceServer) testEmbeddedByValue() {} + +// UnsafeExporterServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ExporterServiceServer will +// result in compilation errors. +type UnsafeExporterServiceServer interface { + mustEmbedUnimplementedExporterServiceServer() +} + +func RegisterExporterServiceServer(s grpc.ServiceRegistrar, srv ExporterServiceServer) { + // If the following call pancis, it indicates UnimplementedExporterServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&ExporterService_ServiceDesc, srv) +} + +func _ExporterService_GetReport_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(emptypb.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ExporterServiceServer).GetReport(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ExporterService_GetReport_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ExporterServiceServer).GetReport(ctx, req.(*emptypb.Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _ExporterService_DriverCall_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DriverCallRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ExporterServiceServer).DriverCall(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ExporterService_DriverCall_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ExporterServiceServer).DriverCall(ctx, req.(*DriverCallRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ExporterService_StreamingDriverCall_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(StreamingDriverCallRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(ExporterServiceServer).StreamingDriverCall(m, &grpc.GenericServerStream[StreamingDriverCallRequest, StreamingDriverCallResponse]{ServerStream: stream}) +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type ExporterService_StreamingDriverCallServer = grpc.ServerStreamingServer[StreamingDriverCallResponse] + +func _ExporterService_LogStream_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(emptypb.Empty) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(ExporterServiceServer).LogStream(m, &grpc.GenericServerStream[emptypb.Empty, LogStreamResponse]{ServerStream: stream}) +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type ExporterService_LogStreamServer = grpc.ServerStreamingServer[LogStreamResponse] + +func _ExporterService_Reset_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ResetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ExporterServiceServer).Reset(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ExporterService_Reset_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ExporterServiceServer).Reset(ctx, req.(*ResetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ExporterService_GetStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetStatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ExporterServiceServer).GetStatus(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ExporterService_GetStatus_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ExporterServiceServer).GetStatus(ctx, req.(*GetStatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// ExporterService_ServiceDesc is the grpc.ServiceDesc for ExporterService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var ExporterService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "jumpstarter.v1.ExporterService", + HandlerType: (*ExporterServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetReport", + Handler: _ExporterService_GetReport_Handler, + }, + { + MethodName: "DriverCall", + Handler: _ExporterService_DriverCall_Handler, + }, + { + MethodName: "Reset", + Handler: _ExporterService_Reset_Handler, + }, + { + MethodName: "GetStatus", + Handler: _ExporterService_GetStatus_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamingDriverCall", + Handler: _ExporterService_StreamingDriverCall_Handler, + ServerStreams: true, + }, + { + StreamName: "LogStream", + Handler: _ExporterService_LogStream_Handler, + ServerStreams: true, + }, + }, + Metadata: "jumpstarter/v1/jumpstarter.proto", +} diff --git a/controller/internal/protocol/jumpstarter/v1/kubernetes.pb.go b/controller/internal/protocol/jumpstarter/v1/kubernetes.pb.go new file mode 100644 index 000000000..dac8801a2 --- /dev/null +++ b/controller/internal/protocol/jumpstarter/v1/kubernetes.pb.go @@ -0,0 +1,368 @@ +// Copyright 2024 The Jumpstarter Authors + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.10 +// protoc (unknown) +// source: jumpstarter/v1/kubernetes.proto + +package jumpstarterv1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type LabelSelectorRequirement struct { + state protoimpl.MessageState `protogen:"open.v1"` + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Operator string `protobuf:"bytes,2,opt,name=operator,proto3" json:"operator,omitempty"` + Values []string `protobuf:"bytes,3,rep,name=values,proto3" json:"values,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *LabelSelectorRequirement) Reset() { + *x = LabelSelectorRequirement{} + mi := &file_jumpstarter_v1_kubernetes_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *LabelSelectorRequirement) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LabelSelectorRequirement) ProtoMessage() {} + +func (x *LabelSelectorRequirement) ProtoReflect() protoreflect.Message { + mi := &file_jumpstarter_v1_kubernetes_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LabelSelectorRequirement.ProtoReflect.Descriptor instead. +func (*LabelSelectorRequirement) Descriptor() ([]byte, []int) { + return file_jumpstarter_v1_kubernetes_proto_rawDescGZIP(), []int{0} +} + +func (x *LabelSelectorRequirement) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *LabelSelectorRequirement) GetOperator() string { + if x != nil { + return x.Operator + } + return "" +} + +func (x *LabelSelectorRequirement) GetValues() []string { + if x != nil { + return x.Values + } + return nil +} + +// Reference: https://kubernetes.io/docs/reference/kubernetes-api/common-definitions/label-selector/ +type LabelSelector struct { + state protoimpl.MessageState `protogen:"open.v1"` + MatchExpressions []*LabelSelectorRequirement `protobuf:"bytes,1,rep,name=match_expressions,json=matchExpressions,proto3" json:"match_expressions,omitempty"` + MatchLabels map[string]string `protobuf:"bytes,2,rep,name=match_labels,json=matchLabels,proto3" json:"match_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *LabelSelector) Reset() { + *x = LabelSelector{} + mi := &file_jumpstarter_v1_kubernetes_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *LabelSelector) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LabelSelector) ProtoMessage() {} + +func (x *LabelSelector) ProtoReflect() protoreflect.Message { + mi := &file_jumpstarter_v1_kubernetes_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LabelSelector.ProtoReflect.Descriptor instead. +func (*LabelSelector) Descriptor() ([]byte, []int) { + return file_jumpstarter_v1_kubernetes_proto_rawDescGZIP(), []int{1} +} + +func (x *LabelSelector) GetMatchExpressions() []*LabelSelectorRequirement { + if x != nil { + return x.MatchExpressions + } + return nil +} + +func (x *LabelSelector) GetMatchLabels() map[string]string { + if x != nil { + return x.MatchLabels + } + return nil +} + +// Reference: https://github.com/kubernetes/kubernetes/blob/v1.31.1/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto +type Time struct { + state protoimpl.MessageState `protogen:"open.v1"` + Seconds *int64 `protobuf:"varint,1,opt,name=seconds,proto3,oneof" json:"seconds,omitempty"` + Nanos *int32 `protobuf:"varint,2,opt,name=nanos,proto3,oneof" json:"nanos,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Time) Reset() { + *x = Time{} + mi := &file_jumpstarter_v1_kubernetes_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Time) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Time) ProtoMessage() {} + +func (x *Time) ProtoReflect() protoreflect.Message { + mi := &file_jumpstarter_v1_kubernetes_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Time.ProtoReflect.Descriptor instead. +func (*Time) Descriptor() ([]byte, []int) { + return file_jumpstarter_v1_kubernetes_proto_rawDescGZIP(), []int{2} +} + +func (x *Time) GetSeconds() int64 { + if x != nil && x.Seconds != nil { + return *x.Seconds + } + return 0 +} + +func (x *Time) GetNanos() int32 { + if x != nil && x.Nanos != nil { + return *x.Nanos + } + return 0 +} + +type Condition struct { + state protoimpl.MessageState `protogen:"open.v1"` + Type *string `protobuf:"bytes,1,opt,name=type,proto3,oneof" json:"type,omitempty"` + Status *string `protobuf:"bytes,2,opt,name=status,proto3,oneof" json:"status,omitempty"` + ObservedGeneration *int64 `protobuf:"varint,3,opt,name=observedGeneration,proto3,oneof" json:"observedGeneration,omitempty"` + LastTransitionTime *Time `protobuf:"bytes,4,opt,name=lastTransitionTime,proto3,oneof" json:"lastTransitionTime,omitempty"` + Reason *string `protobuf:"bytes,5,opt,name=reason,proto3,oneof" json:"reason,omitempty"` + Message *string `protobuf:"bytes,6,opt,name=message,proto3,oneof" json:"message,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Condition) Reset() { + *x = Condition{} + mi := &file_jumpstarter_v1_kubernetes_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Condition) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Condition) ProtoMessage() {} + +func (x *Condition) ProtoReflect() protoreflect.Message { + mi := &file_jumpstarter_v1_kubernetes_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Condition.ProtoReflect.Descriptor instead. +func (*Condition) Descriptor() ([]byte, []int) { + return file_jumpstarter_v1_kubernetes_proto_rawDescGZIP(), []int{3} +} + +func (x *Condition) GetType() string { + if x != nil && x.Type != nil { + return *x.Type + } + return "" +} + +func (x *Condition) GetStatus() string { + if x != nil && x.Status != nil { + return *x.Status + } + return "" +} + +func (x *Condition) GetObservedGeneration() int64 { + if x != nil && x.ObservedGeneration != nil { + return *x.ObservedGeneration + } + return 0 +} + +func (x *Condition) GetLastTransitionTime() *Time { + if x != nil { + return x.LastTransitionTime + } + return nil +} + +func (x *Condition) GetReason() string { + if x != nil && x.Reason != nil { + return *x.Reason + } + return "" +} + +func (x *Condition) GetMessage() string { + if x != nil && x.Message != nil { + return *x.Message + } + return "" +} + +var File_jumpstarter_v1_kubernetes_proto protoreflect.FileDescriptor + +const file_jumpstarter_v1_kubernetes_proto_rawDesc = "" + + "\n" + + "\x1fjumpstarter/v1/kubernetes.proto\x12\x0ejumpstarter.v1\"`\n" + + "\x18LabelSelectorRequirement\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x1a\n" + + "\boperator\x18\x02 \x01(\tR\boperator\x12\x16\n" + + "\x06values\x18\x03 \x03(\tR\x06values\"\xf9\x01\n" + + "\rLabelSelector\x12U\n" + + "\x11match_expressions\x18\x01 \x03(\v2(.jumpstarter.v1.LabelSelectorRequirementR\x10matchExpressions\x12Q\n" + + "\fmatch_labels\x18\x02 \x03(\v2..jumpstarter.v1.LabelSelector.MatchLabelsEntryR\vmatchLabels\x1a>\n" + + "\x10MatchLabelsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"V\n" + + "\x04Time\x12\x1d\n" + + "\aseconds\x18\x01 \x01(\x03H\x00R\aseconds\x88\x01\x01\x12\x19\n" + + "\x05nanos\x18\x02 \x01(\x05H\x01R\x05nanos\x88\x01\x01B\n" + + "\n" + + "\b_secondsB\b\n" + + "\x06_nanos\"\xd6\x02\n" + + "\tCondition\x12\x17\n" + + "\x04type\x18\x01 \x01(\tH\x00R\x04type\x88\x01\x01\x12\x1b\n" + + "\x06status\x18\x02 \x01(\tH\x01R\x06status\x88\x01\x01\x123\n" + + "\x12observedGeneration\x18\x03 \x01(\x03H\x02R\x12observedGeneration\x88\x01\x01\x12I\n" + + "\x12lastTransitionTime\x18\x04 \x01(\v2\x14.jumpstarter.v1.TimeH\x03R\x12lastTransitionTime\x88\x01\x01\x12\x1b\n" + + "\x06reason\x18\x05 \x01(\tH\x04R\x06reason\x88\x01\x01\x12\x1d\n" + + "\amessage\x18\x06 \x01(\tH\x05R\amessage\x88\x01\x01B\a\n" + + "\x05_typeB\t\n" + + "\a_statusB\x15\n" + + "\x13_observedGenerationB\x15\n" + + "\x13_lastTransitionTimeB\t\n" + + "\a_reasonB\n" + + "\n" + + "\b_messageB\xe0\x01\n" + + "\x12com.jumpstarter.v1B\x0fKubernetesProtoP\x01Z`github.com/jumpstarter-dev/jumpstarter-controller/internal/protocol/jumpstarter/v1;jumpstarterv1\xa2\x02\x03JXX\xaa\x02\x0eJumpstarter.V1\xca\x02\x0eJumpstarter\\V1\xe2\x02\x1aJumpstarter\\V1\\GPBMetadata\xea\x02\x0fJumpstarter::V1b\x06proto3" + +var ( + file_jumpstarter_v1_kubernetes_proto_rawDescOnce sync.Once + file_jumpstarter_v1_kubernetes_proto_rawDescData []byte +) + +func file_jumpstarter_v1_kubernetes_proto_rawDescGZIP() []byte { + file_jumpstarter_v1_kubernetes_proto_rawDescOnce.Do(func() { + file_jumpstarter_v1_kubernetes_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_jumpstarter_v1_kubernetes_proto_rawDesc), len(file_jumpstarter_v1_kubernetes_proto_rawDesc))) + }) + return file_jumpstarter_v1_kubernetes_proto_rawDescData +} + +var file_jumpstarter_v1_kubernetes_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_jumpstarter_v1_kubernetes_proto_goTypes = []any{ + (*LabelSelectorRequirement)(nil), // 0: jumpstarter.v1.LabelSelectorRequirement + (*LabelSelector)(nil), // 1: jumpstarter.v1.LabelSelector + (*Time)(nil), // 2: jumpstarter.v1.Time + (*Condition)(nil), // 3: jumpstarter.v1.Condition + nil, // 4: jumpstarter.v1.LabelSelector.MatchLabelsEntry +} +var file_jumpstarter_v1_kubernetes_proto_depIdxs = []int32{ + 0, // 0: jumpstarter.v1.LabelSelector.match_expressions:type_name -> jumpstarter.v1.LabelSelectorRequirement + 4, // 1: jumpstarter.v1.LabelSelector.match_labels:type_name -> jumpstarter.v1.LabelSelector.MatchLabelsEntry + 2, // 2: jumpstarter.v1.Condition.lastTransitionTime:type_name -> jumpstarter.v1.Time + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_jumpstarter_v1_kubernetes_proto_init() } +func file_jumpstarter_v1_kubernetes_proto_init() { + if File_jumpstarter_v1_kubernetes_proto != nil { + return + } + file_jumpstarter_v1_kubernetes_proto_msgTypes[2].OneofWrappers = []any{} + file_jumpstarter_v1_kubernetes_proto_msgTypes[3].OneofWrappers = []any{} + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_jumpstarter_v1_kubernetes_proto_rawDesc), len(file_jumpstarter_v1_kubernetes_proto_rawDesc)), + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_jumpstarter_v1_kubernetes_proto_goTypes, + DependencyIndexes: file_jumpstarter_v1_kubernetes_proto_depIdxs, + MessageInfos: file_jumpstarter_v1_kubernetes_proto_msgTypes, + }.Build() + File_jumpstarter_v1_kubernetes_proto = out.File + file_jumpstarter_v1_kubernetes_proto_goTypes = nil + file_jumpstarter_v1_kubernetes_proto_depIdxs = nil +} diff --git a/controller/internal/protocol/jumpstarter/v1/router.pb.go b/controller/internal/protocol/jumpstarter/v1/router.pb.go new file mode 100644 index 000000000..49991bcc4 --- /dev/null +++ b/controller/internal/protocol/jumpstarter/v1/router.pb.go @@ -0,0 +1,258 @@ +// Copyright 2024 The Jumpstarter Authors + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.10 +// protoc (unknown) +// source: jumpstarter/v1/router.proto + +package jumpstarterv1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type FrameType int32 + +const ( + FrameType_FRAME_TYPE_DATA FrameType = 0 + FrameType_FRAME_TYPE_RST_STREAM FrameType = 3 + FrameType_FRAME_TYPE_PING FrameType = 6 + FrameType_FRAME_TYPE_GOAWAY FrameType = 7 +) + +// Enum value maps for FrameType. +var ( + FrameType_name = map[int32]string{ + 0: "FRAME_TYPE_DATA", + 3: "FRAME_TYPE_RST_STREAM", + 6: "FRAME_TYPE_PING", + 7: "FRAME_TYPE_GOAWAY", + } + FrameType_value = map[string]int32{ + "FRAME_TYPE_DATA": 0, + "FRAME_TYPE_RST_STREAM": 3, + "FRAME_TYPE_PING": 6, + "FRAME_TYPE_GOAWAY": 7, + } +) + +func (x FrameType) Enum() *FrameType { + p := new(FrameType) + *p = x + return p +} + +func (x FrameType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FrameType) Descriptor() protoreflect.EnumDescriptor { + return file_jumpstarter_v1_router_proto_enumTypes[0].Descriptor() +} + +func (FrameType) Type() protoreflect.EnumType { + return &file_jumpstarter_v1_router_proto_enumTypes[0] +} + +func (x FrameType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use FrameType.Descriptor instead. +func (FrameType) EnumDescriptor() ([]byte, []int) { + return file_jumpstarter_v1_router_proto_rawDescGZIP(), []int{0} +} + +type StreamRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Payload []byte `protobuf:"bytes,1,opt,name=payload,proto3" json:"payload,omitempty"` + FrameType FrameType `protobuf:"varint,2,opt,name=frame_type,json=frameType,proto3,enum=jumpstarter.v1.FrameType" json:"frame_type,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StreamRequest) Reset() { + *x = StreamRequest{} + mi := &file_jumpstarter_v1_router_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StreamRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StreamRequest) ProtoMessage() {} + +func (x *StreamRequest) ProtoReflect() protoreflect.Message { + mi := &file_jumpstarter_v1_router_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StreamRequest.ProtoReflect.Descriptor instead. +func (*StreamRequest) Descriptor() ([]byte, []int) { + return file_jumpstarter_v1_router_proto_rawDescGZIP(), []int{0} +} + +func (x *StreamRequest) GetPayload() []byte { + if x != nil { + return x.Payload + } + return nil +} + +func (x *StreamRequest) GetFrameType() FrameType { + if x != nil { + return x.FrameType + } + return FrameType_FRAME_TYPE_DATA +} + +type StreamResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Payload []byte `protobuf:"bytes,1,opt,name=payload,proto3" json:"payload,omitempty"` + FrameType FrameType `protobuf:"varint,2,opt,name=frame_type,json=frameType,proto3,enum=jumpstarter.v1.FrameType" json:"frame_type,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StreamResponse) Reset() { + *x = StreamResponse{} + mi := &file_jumpstarter_v1_router_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StreamResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StreamResponse) ProtoMessage() {} + +func (x *StreamResponse) ProtoReflect() protoreflect.Message { + mi := &file_jumpstarter_v1_router_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StreamResponse.ProtoReflect.Descriptor instead. +func (*StreamResponse) Descriptor() ([]byte, []int) { + return file_jumpstarter_v1_router_proto_rawDescGZIP(), []int{1} +} + +func (x *StreamResponse) GetPayload() []byte { + if x != nil { + return x.Payload + } + return nil +} + +func (x *StreamResponse) GetFrameType() FrameType { + if x != nil { + return x.FrameType + } + return FrameType_FRAME_TYPE_DATA +} + +var File_jumpstarter_v1_router_proto protoreflect.FileDescriptor + +const file_jumpstarter_v1_router_proto_rawDesc = "" + + "\n" + + "\x1bjumpstarter/v1/router.proto\x12\x0ejumpstarter.v1\"c\n" + + "\rStreamRequest\x12\x18\n" + + "\apayload\x18\x01 \x01(\fR\apayload\x128\n" + + "\n" + + "frame_type\x18\x02 \x01(\x0e2\x19.jumpstarter.v1.FrameTypeR\tframeType\"d\n" + + "\x0eStreamResponse\x12\x18\n" + + "\apayload\x18\x01 \x01(\fR\apayload\x128\n" + + "\n" + + "frame_type\x18\x02 \x01(\x0e2\x19.jumpstarter.v1.FrameTypeR\tframeType*g\n" + + "\tFrameType\x12\x13\n" + + "\x0fFRAME_TYPE_DATA\x10\x00\x12\x19\n" + + "\x15FRAME_TYPE_RST_STREAM\x10\x03\x12\x13\n" + + "\x0fFRAME_TYPE_PING\x10\x06\x12\x15\n" + + "\x11FRAME_TYPE_GOAWAY\x10\a2\\\n" + + "\rRouterService\x12K\n" + + "\x06Stream\x12\x1d.jumpstarter.v1.StreamRequest\x1a\x1e.jumpstarter.v1.StreamResponse(\x010\x01B\xdc\x01\n" + + "\x12com.jumpstarter.v1B\vRouterProtoP\x01Z`github.com/jumpstarter-dev/jumpstarter-controller/internal/protocol/jumpstarter/v1;jumpstarterv1\xa2\x02\x03JXX\xaa\x02\x0eJumpstarter.V1\xca\x02\x0eJumpstarter\\V1\xe2\x02\x1aJumpstarter\\V1\\GPBMetadata\xea\x02\x0fJumpstarter::V1b\x06proto3" + +var ( + file_jumpstarter_v1_router_proto_rawDescOnce sync.Once + file_jumpstarter_v1_router_proto_rawDescData []byte +) + +func file_jumpstarter_v1_router_proto_rawDescGZIP() []byte { + file_jumpstarter_v1_router_proto_rawDescOnce.Do(func() { + file_jumpstarter_v1_router_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_jumpstarter_v1_router_proto_rawDesc), len(file_jumpstarter_v1_router_proto_rawDesc))) + }) + return file_jumpstarter_v1_router_proto_rawDescData +} + +var file_jumpstarter_v1_router_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_jumpstarter_v1_router_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_jumpstarter_v1_router_proto_goTypes = []any{ + (FrameType)(0), // 0: jumpstarter.v1.FrameType + (*StreamRequest)(nil), // 1: jumpstarter.v1.StreamRequest + (*StreamResponse)(nil), // 2: jumpstarter.v1.StreamResponse +} +var file_jumpstarter_v1_router_proto_depIdxs = []int32{ + 0, // 0: jumpstarter.v1.StreamRequest.frame_type:type_name -> jumpstarter.v1.FrameType + 0, // 1: jumpstarter.v1.StreamResponse.frame_type:type_name -> jumpstarter.v1.FrameType + 1, // 2: jumpstarter.v1.RouterService.Stream:input_type -> jumpstarter.v1.StreamRequest + 2, // 3: jumpstarter.v1.RouterService.Stream:output_type -> jumpstarter.v1.StreamResponse + 3, // [3:4] is the sub-list for method output_type + 2, // [2:3] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_jumpstarter_v1_router_proto_init() } +func file_jumpstarter_v1_router_proto_init() { + if File_jumpstarter_v1_router_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_jumpstarter_v1_router_proto_rawDesc), len(file_jumpstarter_v1_router_proto_rawDesc)), + NumEnums: 1, + NumMessages: 2, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_jumpstarter_v1_router_proto_goTypes, + DependencyIndexes: file_jumpstarter_v1_router_proto_depIdxs, + EnumInfos: file_jumpstarter_v1_router_proto_enumTypes, + MessageInfos: file_jumpstarter_v1_router_proto_msgTypes, + }.Build() + File_jumpstarter_v1_router_proto = out.File + file_jumpstarter_v1_router_proto_goTypes = nil + file_jumpstarter_v1_router_proto_depIdxs = nil +} diff --git a/controller/internal/protocol/jumpstarter/v1/router_grpc.pb.go b/controller/internal/protocol/jumpstarter/v1/router_grpc.pb.go new file mode 100644 index 000000000..3bc8ee395 --- /dev/null +++ b/controller/internal/protocol/jumpstarter/v1/router_grpc.pb.go @@ -0,0 +1,133 @@ +// Copyright 2024 The Jumpstarter Authors + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.5.1 +// - protoc (unknown) +// source: jumpstarter/v1/router.proto + +package jumpstarterv1 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 + +const ( + RouterService_Stream_FullMethodName = "/jumpstarter.v1.RouterService/Stream" +) + +// RouterServiceClient is the client API for RouterService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +// +// StreamService +// Claims: +// iss: jumpstarter controller +// aud: jumpstarter router +// sub: jumpstarter client/exporter +// stream: stream id +type RouterServiceClient interface { + // Stream connects caller to another caller of the same stream + Stream(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[StreamRequest, StreamResponse], error) +} + +type routerServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewRouterServiceClient(cc grpc.ClientConnInterface) RouterServiceClient { + return &routerServiceClient{cc} +} + +func (c *routerServiceClient) Stream(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[StreamRequest, StreamResponse], error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &RouterService_ServiceDesc.Streams[0], RouterService_Stream_FullMethodName, cOpts...) + if err != nil { + return nil, err + } + x := &grpc.GenericClientStream[StreamRequest, StreamResponse]{ClientStream: stream} + return x, nil +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type RouterService_StreamClient = grpc.BidiStreamingClient[StreamRequest, StreamResponse] + +// RouterServiceServer is the server API for RouterService service. +// All implementations must embed UnimplementedRouterServiceServer +// for forward compatibility. +// +// StreamService +// Claims: +// iss: jumpstarter controller +// aud: jumpstarter router +// sub: jumpstarter client/exporter +// stream: stream id +type RouterServiceServer interface { + // Stream connects caller to another caller of the same stream + Stream(grpc.BidiStreamingServer[StreamRequest, StreamResponse]) error + mustEmbedUnimplementedRouterServiceServer() +} + +// UnimplementedRouterServiceServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedRouterServiceServer struct{} + +func (UnimplementedRouterServiceServer) Stream(grpc.BidiStreamingServer[StreamRequest, StreamResponse]) error { + return status.Errorf(codes.Unimplemented, "method Stream not implemented") +} +func (UnimplementedRouterServiceServer) mustEmbedUnimplementedRouterServiceServer() {} +func (UnimplementedRouterServiceServer) testEmbeddedByValue() {} + +// UnsafeRouterServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to RouterServiceServer will +// result in compilation errors. +type UnsafeRouterServiceServer interface { + mustEmbedUnimplementedRouterServiceServer() +} + +func RegisterRouterServiceServer(s grpc.ServiceRegistrar, srv RouterServiceServer) { + // If the following call pancis, it indicates UnimplementedRouterServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&RouterService_ServiceDesc, srv) +} + +func _RouterService_Stream_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(RouterServiceServer).Stream(&grpc.GenericServerStream[StreamRequest, StreamResponse]{ServerStream: stream}) +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type RouterService_StreamServer = grpc.BidiStreamingServer[StreamRequest, StreamResponse] + +// RouterService_ServiceDesc is the grpc.ServiceDesc for RouterService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var RouterService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "jumpstarter.v1.RouterService", + HandlerType: (*RouterServiceServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "Stream", + Handler: _RouterService_Stream_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "jumpstarter/v1/router.proto", +} diff --git a/controller/internal/service/auth/auth.go b/controller/internal/service/auth/auth.go new file mode 100644 index 000000000..9df985896 --- /dev/null +++ b/controller/internal/service/auth/auth.go @@ -0,0 +1,75 @@ +package auth + +import ( + "context" + + jumpstarterdevv1alpha1 "github.com/jumpstarter-dev/jumpstarter-controller/api/v1alpha1" + "github.com/jumpstarter-dev/jumpstarter-controller/internal/authentication" + "github.com/jumpstarter-dev/jumpstarter-controller/internal/authorization" + "github.com/jumpstarter-dev/jumpstarter-controller/internal/oidc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "k8s.io/apiserver/pkg/authorization/authorizer" + kclient "sigs.k8s.io/controller-runtime/pkg/client" +) + +type Auth struct { + client kclient.Client + authn authentication.ContextAuthenticator + authz authorizer.Authorizer + attr authorization.ContextAttributesGetter +} + +func NewAuth( + client kclient.Client, + authn authentication.ContextAuthenticator, + authz authorizer.Authorizer, + attr authorization.ContextAttributesGetter, +) *Auth { + return &Auth{ + client: client, + authn: authn, + authz: authz, + attr: attr, + } +} + +func (s *Auth) AuthClient(ctx context.Context, namespace string) (*jumpstarterdevv1alpha1.Client, error) { + jclient, err := oidc.VerifyClientObjectToken( + ctx, + s.authn, + s.authz, + s.attr, + s.client, + ) + + if err != nil { + return nil, err + } + + if namespace != jclient.Namespace { + return nil, status.Error(codes.PermissionDenied, "namespace mismatch") + } + + return jclient, nil +} + +func (s *Auth) AuthExporter(ctx context.Context, namespace string) (*jumpstarterdevv1alpha1.Exporter, error) { + jexporter, err := oidc.VerifyExporterObjectToken( + ctx, + s.authn, + s.authz, + s.attr, + s.client, + ) + + if err != nil { + return nil, err + } + + if namespace != jexporter.Namespace { + return nil, status.Error(codes.PermissionDenied, "namespace mismatch") + } + + return jexporter, nil +} diff --git a/controller/internal/service/client/v1/client_service.go b/controller/internal/service/client/v1/client_service.go new file mode 100644 index 000000000..90ae62c93 --- /dev/null +++ b/controller/internal/service/client/v1/client_service.go @@ -0,0 +1,322 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + "fmt" + + "github.com/google/uuid" + jumpstarterdevv1alpha1 "github.com/jumpstarter-dev/jumpstarter-controller/api/v1alpha1" + "github.com/jumpstarter-dev/jumpstarter-controller/internal/controller" + cpb "github.com/jumpstarter-dev/jumpstarter-controller/internal/protocol/jumpstarter/client/v1" + "github.com/jumpstarter-dev/jumpstarter-controller/internal/service/auth" + "github.com/jumpstarter-dev/jumpstarter-controller/internal/service/utils" + "google.golang.org/protobuf/types/known/emptypb" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + kclient "sigs.k8s.io/controller-runtime/pkg/client" +) + +type ClientService struct { + cpb.UnimplementedClientServiceServer + kclient.Client + auth.Auth +} + +func NewClientService(client kclient.Client, auth auth.Auth) *ClientService { + return &ClientService{ + Client: client, + Auth: auth, + } +} + +func (s *ClientService) GetExporter( + ctx context.Context, + req *cpb.GetExporterRequest, +) (*cpb.Exporter, error) { + key, err := utils.ParseExporterIdentifier(req.Name) + if err != nil { + return nil, err + } + + _, err = s.AuthClient(ctx, key.Namespace) + if err != nil { + return nil, err + } + + var jexporter jumpstarterdevv1alpha1.Exporter + if err := s.Get(ctx, *key, &jexporter); err != nil { + return nil, err + } + + return jexporter.ToProtobuf(), nil +} + +func (s *ClientService) ListExporters( + ctx context.Context, + req *cpb.ListExportersRequest, +) (*cpb.ListExportersResponse, error) { + namespace, err := utils.ParseNamespaceIdentifier(req.Parent) + if err != nil { + return nil, err + } + + _, err = s.AuthClient(ctx, namespace) + if err != nil { + return nil, err + } + + selector, err := labels.Parse(req.Filter) + if err != nil { + return nil, err + } + + var jexporters jumpstarterdevv1alpha1.ExporterList + if err := s.List(ctx, &jexporters, &kclient.ListOptions{ + Namespace: namespace, + LabelSelector: selector, + Limit: int64(req.PageSize), + Continue: req.PageToken, + }); err != nil { + return nil, err + } + + return jexporters.ToProtobuf(), nil +} + +func (s *ClientService) GetLease(ctx context.Context, req *cpb.GetLeaseRequest) (*cpb.Lease, error) { + key, err := utils.ParseLeaseIdentifier(req.Name) + if err != nil { + return nil, err + } + + _, err = s.AuthClient(ctx, key.Namespace) + if err != nil { + return nil, err + } + + var jlease jumpstarterdevv1alpha1.Lease + if err := s.Get(ctx, *key, &jlease); err != nil { + return nil, err + } + + return jlease.ToProtobuf(), nil +} + +func (s *ClientService) ListLeases(ctx context.Context, req *cpb.ListLeasesRequest) (*cpb.ListLeasesResponse, error) { + namespace, err := utils.ParseNamespaceIdentifier(req.Parent) + if err != nil { + return nil, err + } + + _, err = s.AuthClient(ctx, namespace) + if err != nil { + return nil, err + } + + selector, err := labels.Parse(req.Filter) + if err != nil { + return nil, err + } + + listOptions := []kclient.ListOption{ + kclient.InNamespace(namespace), + kclient.MatchingLabelsSelector{Selector: selector}, + kclient.Limit(int64(req.PageSize)), + kclient.Continue(req.PageToken), + } + + // Apply active-only filter by default (when only_active is nil or true) + if req.OnlyActive == nil || *req.OnlyActive { + listOptions = append(listOptions, controller.MatchingActiveLeases()) + } + + var jleases jumpstarterdevv1alpha1.LeaseList + if err := s.List(ctx, &jleases, listOptions...); err != nil { + return nil, err + } + + var results []*cpb.Lease + for _, lease := range jleases.Items { + results = append(results, lease.ToProtobuf()) + } + + return &cpb.ListLeasesResponse{ + Leases: results, + NextPageToken: jleases.Continue, + }, nil +} + +func (s *ClientService) CreateLease(ctx context.Context, req *cpb.CreateLeaseRequest) (*cpb.Lease, error) { + namespace, err := utils.ParseNamespaceIdentifier(req.Parent) + if err != nil { + return nil, err + } + + jclient, err := s.AuthClient(ctx, namespace) + if err != nil { + return nil, err + } + + // Use provided lease_id if specified, otherwise generate a UUIDv7 + name := req.LeaseId + if name == "" { + id, err := uuid.NewV7() + if err != nil { + return nil, err + } + name = id.String() + } + + jlease, err := jumpstarterdevv1alpha1.LeaseFromProtobuf(req.Lease, types.NamespacedName{ + Namespace: namespace, + Name: name, + }, corev1.LocalObjectReference{ + Name: jclient.Name, + }) + if err != nil { + return nil, err + } + + if err := s.Create(ctx, jlease); err != nil { + return nil, err + } + + return jlease.ToProtobuf(), nil +} + +func (s *ClientService) UpdateLease(ctx context.Context, req *cpb.UpdateLeaseRequest) (*cpb.Lease, error) { + key, err := utils.ParseLeaseIdentifier(req.Lease.Name) + if err != nil { + return nil, err + } + + jclient, err := s.AuthClient(ctx, key.Namespace) + if err != nil { + return nil, err + } + + var jlease jumpstarterdevv1alpha1.Lease + if err := s.Get(ctx, *key, &jlease); err != nil { + return nil, err + } + + if jlease.Spec.ClientRef.Name != jclient.Name { + return nil, fmt.Errorf("UpdateLease permission denied") + } + + original := kclient.MergeFrom(jlease.DeepCopy()) + + // Only parse time fields from protobuf if any are being updated + if req.Lease.BeginTime != nil || req.Lease.Duration != nil || req.Lease.EndTime != nil { + desired, err := jumpstarterdevv1alpha1.LeaseFromProtobuf(req.Lease, *key, + corev1.LocalObjectReference{ + Name: jclient.Name, + }, + ) + if err != nil { + return nil, err + } + + // BeginTime can only be updated before lease starts; only if explicitly provided + if req.Lease.BeginTime != nil { + if jlease.Status.ExporterRef != nil { + if jlease.Spec.BeginTime == nil || !jlease.Spec.BeginTime.Equal(desired.Spec.BeginTime) { + return nil, fmt.Errorf("cannot update BeginTime: lease has already started") + } + } + jlease.Spec.BeginTime = desired.Spec.BeginTime + } + // Update Duration only if provided; preserve existing otherwise + if req.Lease.Duration != nil { + jlease.Spec.Duration = desired.Spec.Duration + } + // Update EndTime only if provided; preserve existing otherwise + if req.Lease.EndTime != nil { + jlease.Spec.EndTime = desired.Spec.EndTime + } + } + + // Transfer lease to a new client if specified + if req.Lease.Client != nil && *req.Lease.Client != "" { + // Only active leases can be transferred (has exporter, not ended) + if jlease.Status.ExporterRef == nil { + return nil, fmt.Errorf("cannot transfer lease: lease has not started yet") + } + if jlease.Status.Ended { + return nil, fmt.Errorf("cannot transfer lease: lease has already ended") + } + newClientKey, err := utils.ParseClientIdentifier(*req.Lease.Client) + if err != nil { + return nil, err + } + if newClientKey.Namespace != key.Namespace { + return nil, fmt.Errorf("cannot transfer lease to client in different namespace") + } + var newClient jumpstarterdevv1alpha1.Client + if err := s.Get(ctx, *newClientKey, &newClient); err != nil { + return nil, fmt.Errorf("target client not found: %w", err) + } + jlease.Spec.ClientRef.Name = newClientKey.Name + } + + // Recalculate missing field or validate consistency (only if time fields were updated) + if req.Lease.BeginTime != nil || req.Lease.Duration != nil || req.Lease.EndTime != nil { + if err := jumpstarterdevv1alpha1.ReconcileLeaseTimeFields(&jlease.Spec.BeginTime, &jlease.Spec.EndTime, &jlease.Spec.Duration); err != nil { + return nil, err + } + } + + if err := s.Patch(ctx, &jlease, original); err != nil { + return nil, err + } + + return jlease.ToProtobuf(), nil +} + +func (s *ClientService) DeleteLease(ctx context.Context, req *cpb.DeleteLeaseRequest) (*emptypb.Empty, error) { + key, err := utils.ParseLeaseIdentifier(req.Name) + if err != nil { + return nil, err + } + + jclient, err := s.AuthClient(ctx, key.Namespace) + if err != nil { + return nil, err + } + + var jlease jumpstarterdevv1alpha1.Lease + if err := s.Get(ctx, *key, &jlease); err != nil { + return nil, err + } + + if jlease.Spec.ClientRef.Name != jclient.Name { + return nil, fmt.Errorf("DeleteLease permission denied") + } + + original := kclient.MergeFrom(jlease.DeepCopy()) + + jlease.Spec.Release = true + + if err := s.Patch(ctx, &jlease, original); err != nil { + return nil, err + } + + return &emptypb.Empty{}, nil +} diff --git a/controller/internal/service/controller_service.go b/controller/internal/service/controller_service.go new file mode 100644 index 000000000..97c06693d --- /dev/null +++ b/controller/internal/service/controller_service.go @@ -0,0 +1,792 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package service + +import ( + "cmp" + "context" + "crypto/tls" + "fmt" + "net/http" + "os" + "slices" + "strings" + "sync" + "time" + + "golang.org/x/exp/maps" + + gwruntime "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" + + "github.com/golang-jwt/jwt/v5" + "github.com/google/uuid" + "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/recovery" + "github.com/jumpstarter-dev/jumpstarter-controller/internal/authentication" + "github.com/jumpstarter-dev/jumpstarter-controller/internal/authorization" + "github.com/jumpstarter-dev/jumpstarter-controller/internal/config" + jlog "github.com/jumpstarter-dev/jumpstarter-controller/internal/log" + "github.com/jumpstarter-dev/jumpstarter-controller/internal/oidc" + cpb "github.com/jumpstarter-dev/jumpstarter-controller/internal/protocol/jumpstarter/client/v1" + pb "github.com/jumpstarter-dev/jumpstarter-controller/internal/protocol/jumpstarter/v1" + "github.com/jumpstarter-dev/jumpstarter-controller/internal/service/auth" + clientsvcv1 "github.com/jumpstarter-dev/jumpstarter-controller/internal/service/client/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/reflection" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/timestamppb" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + k8suuid "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/apiserver/pkg/authorization/authorizer" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + + jumpstarterdevv1alpha1 "github.com/jumpstarter-dev/jumpstarter-controller/api/v1alpha1" + "github.com/jumpstarter-dev/jumpstarter-controller/internal/controller" + "google.golang.org/protobuf/proto" +) + +// ControllerService exposes a gRPC service +type ControllerService struct { + pb.UnimplementedControllerServiceServer + Client client.WithWatch + Scheme *runtime.Scheme + Authn authentication.ContextAuthenticator + Authz authorizer.Authorizer + Attr authorization.ContextAttributesGetter + ServerOption grpc.ServerOption + Router config.Router + listenQueues sync.Map +} + +type wrappedStream struct { + grpc.ServerStream +} + +func logContext(ctx context.Context) context.Context { + p, ok := peer.FromContext(ctx) + if ok { + return log.IntoContext(ctx, log.FromContext(ctx, "peer", p.Addr)) + } + return ctx +} + +func (w *wrappedStream) Context() context.Context { + return logContext(w.ServerStream.Context()) +} + +func (s *ControllerService) authenticateClient(ctx context.Context) (*jumpstarterdevv1alpha1.Client, error) { + return oidc.VerifyClientObjectToken( + ctx, + s.Authn, + s.Authz, + s.Attr, + s.Client, + ) +} + +func (s *ControllerService) authenticateExporter(ctx context.Context) (*jumpstarterdevv1alpha1.Exporter, error) { + return oidc.VerifyExporterObjectToken( + ctx, + s.Authn, + s.Authz, + s.Attr, + s.Client, + ) +} + +func (s *ControllerService) Register(ctx context.Context, req *pb.RegisterRequest) (*pb.RegisterResponse, error) { + logger := log.FromContext(ctx) + + exporter, err := s.authenticateExporter(ctx) + if err != nil { + logger.Info("unable to authenticate exporter", "error", err.Error()) + return nil, err + } + + logger = logger.WithValues("exporter", types.NamespacedName{ + Namespace: exporter.Namespace, + Name: exporter.Name, + }) + + logger.Info("Registering exporter") + + original := client.MergeFrom(exporter.DeepCopy()) + + if exporter.Labels == nil { + exporter.Labels = make(map[string]string) + } + + for k := range exporter.Labels { + if strings.HasPrefix(k, "jumpstarter.dev/") { + delete(exporter.Labels, k) + } + } + + for k, v := range req.Labels { + if strings.HasPrefix(k, "jumpstarter.dev/") { + exporter.Labels[k] = v + } + } + + if err := s.Client.Patch(ctx, exporter, original); err != nil { + logger.Error(err, "unable to update exporter") + return nil, status.Errorf(codes.Internal, "unable to update exporter: %s", err) + } + + original = client.MergeFrom(exporter.DeepCopy()) + + devices := []jumpstarterdevv1alpha1.Device{} + for _, device := range req.Reports { + devices = append(devices, jumpstarterdevv1alpha1.Device{ + Uuid: device.Uuid, + ParentUuid: device.ParentUuid, + Labels: device.Labels, + }) + } + exporter.Status.Devices = devices + + if err := s.Client.Status().Patch(ctx, exporter, original); err != nil { + logger.Error(err, "unable to update exporter status") + return nil, status.Errorf(codes.Internal, "unable to update exporter status: %s", err) + } + + return &pb.RegisterResponse{ + Uuid: string(exporter.UID), + }, nil +} + +func (s *ControllerService) Unregister( + ctx context.Context, + req *pb.UnregisterRequest, +) ( + *pb.UnregisterResponse, + error, +) { + logger := log.FromContext(ctx) + + exporter, err := s.authenticateExporter(ctx) + if err != nil { + logger.Error(err, "unable to authenticate exporter") + return nil, err + } + + logger = logger.WithValues("exporter", types.NamespacedName{ + Namespace: exporter.Namespace, + Name: exporter.Name, + }) + + original := client.MergeFrom(exporter.DeepCopy()) + exporter.Status.Devices = nil + + if err := s.Client.Status().Patch(ctx, exporter, original); err != nil { + logger.Error(err, "unable to update exporter status") + return nil, status.Errorf(codes.Internal, "unable to update exporter status: %s", err) + } + + logger.Info("exporter unregistered, updated as unavailable") + + return &pb.UnregisterResponse{}, nil +} + +func (s *ControllerService) Listen(req *pb.ListenRequest, stream pb.ControllerService_ListenServer) error { + ctx := stream.Context() + logger := log.FromContext(ctx) + + exporter, err := s.authenticateExporter(ctx) + if err != nil { + return err + } + + logger = logger.WithValues("exporter", types.NamespacedName{ + Namespace: exporter.Namespace, + Name: exporter.Name, + }) + + leaseName := req.GetLeaseName() + if leaseName == "" { + err := fmt.Errorf("empty lease name") + logger.Error(err, "lease name not specified in dial request") + return err + } + + logger.WithValues("lease", types.NamespacedName{ + Namespace: exporter.Namespace, + Name: leaseName, + }) + + var lease jumpstarterdevv1alpha1.Lease + if err := s.Client.Get( + ctx, + types.NamespacedName{Namespace: exporter.Namespace, Name: leaseName}, + &lease, + ); err != nil { + logger.Error(err, "unable to get lease") + return err + } + + if lease.Status.ExporterRef == nil || lease.Status.ExporterRef.Name != exporter.Name { + err := fmt.Errorf("permission denied") + logger.Error(err, "lease not held by exporter") + return err + } + + queue, _ := s.listenQueues.LoadOrStore(leaseName, make(chan *pb.ListenResponse, 8)) + for { + select { + case <-ctx.Done(): + return nil + case msg := <-queue.(chan *pb.ListenResponse): + if err := stream.Send(msg); err != nil { + return err + } + } + } +} + +// Status is a stream of status updates for the exporter. +// It is used to: +// - Notify the exporter of the current status of the lease +// - Track the exporter's last seen time +func (s *ControllerService) Status(req *pb.StatusRequest, stream pb.ControllerService_StatusServer) error { + ctx := stream.Context() + logger := log.FromContext(ctx) + + exporter, err := s.authenticateExporter(ctx) + if err != nil { + logger.Error(err, "unable to authenticate exporter") + return err + } + + logger = logger.WithValues("exporter", types.NamespacedName{ + Namespace: exporter.Namespace, + Name: exporter.Name, + }) + + watcher, err := s.Client.Watch(ctx, &jumpstarterdevv1alpha1.ExporterList{}, &client.ListOptions{ + FieldSelector: fields.OneTermEqualSelector("metadata.name", exporter.Name), + Namespace: exporter.Namespace, + }) + if err != nil { + logger.Error(err, "failed to watch exporter") + return err + } + + defer watcher.Stop() + + ticker := time.NewTicker(time.Second * 10) + + defer ticker.Stop() + + // use this to track that we are getting updates from the k8s watcher + var watchedLastSeen *metav1.Time + + online := func() { + original := client.MergeFrom(exporter.DeepCopy()) + exporter.Status.LastSeen = metav1.Now() + + if err = s.Client.Status().Patch(ctx, exporter, original); err != nil { + logger.Error(err, "unable to update exporter status.lastSeen") + } + } + + // ticker does not tick instantly, thus calling online immediately once + // https://github.com/golang/go/issues/17601 + select { + case <-ctx.Done(): + return nil + default: + online() + } + + var lastPbStatusResponse *pb.StatusResponse + for { + select { + case <-ctx.Done(): + logger.Info("Status stream terminated normally") + return nil + case <-ticker.C: + // the k8s watchers sometimes stop functioning silently, so we need to detect it + // by comparing the last seen time from the k8s watcher with the last seen time + // from the exporter object we set in the online() function + if watchedLastSeen != nil && !watchedLastSeen.Equal(&exporter.Status.LastSeen) { + logger.Info("The exporter watcher seems to have stopped, terminating status stream") + return fmt.Errorf("last seen time mismatch") + } + online() + case result, ok := <-watcher.ResultChan(): + // Check if the watch channel has been closed + if !ok { + logger.Info("Watch channel closed, terminating status stream") + return fmt.Errorf("watch channel closed") + } + + switch result.Type { + case watch.Added, watch.Modified, watch.Deleted: + exporter = result.Object.(*jumpstarterdevv1alpha1.Exporter) + // track the last seen time from the k8s watcher, so we can detect if + // the watcher stops functioning + watchedLastSeen = exporter.Status.LastSeen.DeepCopy() + + leased := exporter.Status.LeaseRef != nil + leaseName := (*string)(nil) + clientName := (*string)(nil) + + if leased { + leaseName = &exporter.Status.LeaseRef.Name + var lease jumpstarterdevv1alpha1.Lease + if err := s.Client.Get( + ctx, + types.NamespacedName{Namespace: exporter.Namespace, Name: *leaseName}, + &lease, + ); err != nil { + logger.Error(err, "failed to get lease on exporter") + return err + } + clientName = &lease.Spec.ClientRef.Name + } + + status := pb.StatusResponse{ + Leased: leased, + LeaseName: leaseName, + ClientName: clientName, + } + if proto.Equal(lastPbStatusResponse, &status) { + jlog.Verbose(logger, "Not sending status update to exporter, it is the same as the last one") + } else { + logger.Info("Sending status update to exporter", "status", fmt.Sprintf("%+v", &status)) + if err = stream.Send(&status); err != nil { + logger.Error(err, "Failed to send status update to exporter") + return err + } + lastPbStatusResponse = proto.Clone(&status).(*pb.StatusResponse) + } + case watch.Error: + logger.Error(fmt.Errorf("%+v", result.Object), "Received error when watching exporter") + return fmt.Errorf("received error when watching exporter") + } + } + } +} + +func (s *ControllerService) Dial(ctx context.Context, req *pb.DialRequest) (*pb.DialResponse, error) { + logger := log.FromContext(ctx) + + client, err := s.authenticateClient(ctx) + if err != nil { + logger.Error(err, "unable to authenticate client") + return nil, err + } + + logger = logger.WithValues("client", types.NamespacedName{ + Namespace: client.Namespace, + Name: client.Name, + }) + + leaseName := req.GetLeaseName() + if leaseName == "" { + err := fmt.Errorf("empty lease name") + logger.Error(err, "lease name not specified in dial request") + return nil, err + } + + logger = logger.WithValues("lease", types.NamespacedName{ + Namespace: client.Namespace, + Name: leaseName, + }) + + var lease jumpstarterdevv1alpha1.Lease + if err := s.Client.Get( + ctx, + types.NamespacedName{Namespace: client.Namespace, Name: leaseName}, + &lease, + ); err != nil { + logger.Error(err, "unable to get lease") + return nil, err + } + + if lease.Spec.ClientRef.Name != client.Name { + err := fmt.Errorf("permission denied") + logger.Error(err, "lease not held by client") + return nil, err + } + + if lease.Status.ExporterRef == nil { + err := fmt.Errorf("lease not active") + logger.Error(err, "unable to get exporter referenced by lease") + return nil, err + } + + var exporter jumpstarterdevv1alpha1.Exporter + if err := s.Client.Get(ctx, + types.NamespacedName{Namespace: client.Namespace, Name: lease.Status.ExporterRef.Name}, &exporter); err != nil { + logger.Error(err, "unable to get exporter referenced by lease") + return nil, err + } + + candidates := maps.Values(s.Router) + slices.SortFunc(candidates, func(a config.RouterEntry, b config.RouterEntry) int { + return -cmp.Compare(MatchLabels(a.Labels, exporter.Labels), MatchLabels(b.Labels, exporter.Labels)) + }) + + if len(candidates) == 0 { + err := fmt.Errorf("no router available") + logger.Error(err, "no router available") + return nil, err + } + + logger.Info("selected router", "endpoint", candidates[0].Endpoint, "labels", candidates[0].Labels) + + endpoint := candidates[0].Endpoint + + stream := k8suuid.NewUUID() + + token, err := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.RegisteredClaims{ + Issuer: "https://jumpstarter.dev/stream", + Subject: string(stream), + Audience: []string{"https://jumpstarter.dev/router"}, + ExpiresAt: jwt.NewNumericDate(time.Now().Add(time.Minute * 30)), + NotBefore: jwt.NewNumericDate(time.Now()), + IssuedAt: jwt.NewNumericDate(time.Now()), + ID: string(k8suuid.NewUUID()), + }).SignedString([]byte(os.Getenv("ROUTER_KEY"))) + + if err != nil { + logger.Error(err, "unable to sign token") + return nil, status.Errorf(codes.Internal, "unable to sign token") + } + + response := &pb.ListenResponse{ + RouterEndpoint: endpoint, + RouterToken: token, + } + + queue, _ := s.listenQueues.LoadOrStore(leaseName, make(chan *pb.ListenResponse, 8)) + select { + case <-ctx.Done(): + return nil, ctx.Err() + case queue.(chan *pb.ListenResponse) <- response: + } + + logger.Info("Client dial assigned stream", "stream", stream) + return &pb.DialResponse{ + RouterEndpoint: endpoint, + RouterToken: token, + }, nil +} + +func (s *ControllerService) GetLease( + ctx context.Context, + req *pb.GetLeaseRequest, +) (*pb.GetLeaseResponse, error) { + client, err := s.authenticateClient(ctx) + if err != nil { + return nil, err + } + + var lease jumpstarterdevv1alpha1.Lease + if err := s.Client.Get(ctx, types.NamespacedName{ + Namespace: client.Namespace, + Name: req.Name, + }, &lease); err != nil { + return nil, err + } + + if lease.Spec.ClientRef.Name != client.Name { + return nil, fmt.Errorf("GetLease permission denied") + } + + var matchExpressions []*pb.LabelSelectorRequirement + for _, exp := range lease.Spec.Selector.MatchExpressions { + matchExpressions = append(matchExpressions, &pb.LabelSelectorRequirement{ + Key: exp.Key, + Operator: string(exp.Operator), + Values: exp.Values, + }) + } + + var beginTime *timestamppb.Timestamp + if lease.Status.BeginTime != nil { + beginTime = timestamppb.New(lease.Status.BeginTime.Time) + } + var endTime *timestamppb.Timestamp + if lease.Status.EndTime != nil { + endTime = timestamppb.New(lease.Status.EndTime.Time) + } + var exporterUuid *string + if lease.Status.ExporterRef != nil { + var exporter jumpstarterdevv1alpha1.Exporter + if err := s.Client.Get( + ctx, + types.NamespacedName{Namespace: client.Namespace, Name: lease.Status.ExporterRef.Name}, + &exporter, + ); err != nil { + return nil, fmt.Errorf("GetLease fetch exporter uuid failed") + } + exporterUuid = (*string)(&exporter.UID) + } + + var conditions []*pb.Condition + for _, condition := range lease.Status.Conditions { + conditions = append(conditions, &pb.Condition{ + Type: &condition.Type, + Status: (*string)(&condition.Status), + ObservedGeneration: &condition.ObservedGeneration, + LastTransitionTime: &pb.Time{ + Seconds: &condition.LastTransitionTime.ProtoTime().Seconds, + Nanos: &condition.LastTransitionTime.ProtoTime().Nanos, + }, + Reason: &condition.Reason, + Message: &condition.Message, + }) + } + + resp := &pb.GetLeaseResponse{ + Selector: &pb.LabelSelector{MatchExpressions: matchExpressions, MatchLabels: lease.Spec.Selector.MatchLabels}, + BeginTime: beginTime, + EndTime: endTime, + ExporterUuid: exporterUuid, + Conditions: conditions, + } + if lease.Spec.Duration != nil { + resp.Duration = durationpb.New(lease.Spec.Duration.Duration) + } + return resp, nil +} + +func (s *ControllerService) RequestLease( + ctx context.Context, + req *pb.RequestLeaseRequest, +) (*pb.RequestLeaseResponse, error) { + client, err := s.authenticateClient(ctx) + if err != nil { + return nil, err + } + + var matchLabels map[string]string + var matchExpressions []metav1.LabelSelectorRequirement + if req.Selector != nil { + matchLabels = req.Selector.MatchLabels + for _, exp := range req.Selector.MatchExpressions { + matchExpressions = append(matchExpressions, metav1.LabelSelectorRequirement{ + Key: exp.Key, + Operator: metav1.LabelSelectorOperator(exp.Operator), + Values: exp.Values, + }) + } + } + + leaseName, err := uuid.NewV7() + if err != nil { + return nil, err + } + + var lease = jumpstarterdevv1alpha1.Lease{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: client.Namespace, + Name: leaseName.String(), + }, + Spec: jumpstarterdevv1alpha1.LeaseSpec{ + ClientRef: corev1.LocalObjectReference{ + Name: client.Name, + }, + Selector: metav1.LabelSelector{ + MatchLabels: matchLabels, + MatchExpressions: matchExpressions, + }, + }, + } + if req.Duration != nil { + lease.Spec.Duration = &metav1.Duration{Duration: req.Duration.AsDuration()} + } + if err := s.Client.Create(ctx, &lease); err != nil { + return nil, err + } + + return &pb.RequestLeaseResponse{ + Name: lease.Name, + }, nil +} + +func (s *ControllerService) ReleaseLease( + ctx context.Context, + req *pb.ReleaseLeaseRequest, +) (*pb.ReleaseLeaseResponse, error) { + jclient, err := s.authenticateClient(ctx) + if err != nil { + return nil, err + } + + var lease jumpstarterdevv1alpha1.Lease + if err := s.Client.Get(ctx, types.NamespacedName{ + Namespace: jclient.Namespace, + Name: req.Name, + }, &lease); err != nil { + return nil, err + } + + if lease.Spec.ClientRef.Name != jclient.Name { + return nil, fmt.Errorf("ReleaseLease permission denied") + } + + original := client.MergeFrom(lease.DeepCopy()) + lease.Spec.Release = true + + if err := s.Client.Patch(ctx, &lease, original); err != nil { + return nil, err + } + + return &pb.ReleaseLeaseResponse{}, nil +} + +func (s *ControllerService) ListLeases( + ctx context.Context, + req *pb.ListLeasesRequest, +) (*pb.ListLeasesResponse, error) { + jclient, err := s.authenticateClient(ctx) + if err != nil { + return nil, err + } + + var leases jumpstarterdevv1alpha1.LeaseList + if err := s.Client.List( + ctx, + &leases, + client.InNamespace(jclient.Namespace), + controller.MatchingActiveLeases(), + ); err != nil { + return nil, err + } + + var leaseNames []string + for _, lease := range leases.Items { + if lease.Spec.ClientRef.Name == jclient.Name { + leaseNames = append(leaseNames, lease.Name) + } + } + + return &pb.ListLeasesResponse{ + Names: leaseNames, + }, nil +} + +func (s *ControllerService) Start(ctx context.Context) error { + logger := log.FromContext(ctx) + + dnsnames, ipaddresses, err := endpointToSAN(controllerEndpoint()) + if err != nil { + return err + } + + // Load external certificate if provided via environment variables. + // Environment variables EXTERNAL_CERT_PEM and EXTERNAL_KEY_PEM should contain the PEM-encoded + // certificate and private key respectively. If both are set, they are used; otherwise + // a self-signed certificate is generated. + var cert *tls.Certificate + certPEMPath := os.Getenv("EXTERNAL_CERT_PEM") + keyPEMPath := os.Getenv("EXTERNAL_KEY_PEM") + if certPEMPath != "" && keyPEMPath != "" { + certPEMBytes, err := os.ReadFile(certPEMPath) + if err != nil { + return fmt.Errorf("failed to read external certificate file: %w", err) + } + keyPEMBytes, err := os.ReadFile(keyPEMPath) + if err != nil { + return fmt.Errorf("failed to read external key file: %w", err) + } + parsedCert, err := tls.X509KeyPair(certPEMBytes, keyPEMBytes) + if err != nil { + return fmt.Errorf("failed to parse external certificate: %w", err) + } + cert = &parsedCert + } else { + cert, err = NewSelfSignedCertificate("jumpstarter controller", dnsnames, ipaddresses) + if err != nil { + return err + } + } + + server := grpc.NewServer( + s.ServerOption, + grpc.ChainUnaryInterceptor(func( + gctx context.Context, + req any, + _ *grpc.UnaryServerInfo, + handler grpc.UnaryHandler, + ) (resp any, err error) { + return handler(logContext(gctx), req) + }, recovery.UnaryServerInterceptor()), + grpc.ChainStreamInterceptor(func( + srv any, + ss grpc.ServerStream, + _ *grpc.StreamServerInfo, + handler grpc.StreamHandler, + ) error { + return handler(srv, &wrappedStream{ServerStream: ss}) + }, recovery.StreamServerInterceptor()), + ) + + pb.RegisterControllerServiceServer(server, s) + cpb.RegisterClientServiceServer( + server, + clientsvcv1.NewClientService(s.Client, *auth.NewAuth(s.Client, s.Authn, s.Authz, s.Attr)), + ) + + // Register reflection service on gRPC server. + reflection.Register(server) + + // Register gRPC gateway + gwmux := gwruntime.NewServeMux() + + listener, err := tls.Listen("tcp", ":8082", &tls.Config{ + Certificates: []tls.Certificate{*cert}, + NextProtos: []string{"http/1.1", "h2"}, + }) + if err != nil { + return err + } + + logger.Info("Starting Controller grpc service on port 8082") + + go func() { + <-ctx.Done() + logger.Info("Stopping Controller gRPC service") + server.Stop() + }() + + return http.Serve(listener, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.ProtoMajor == 2 && strings.HasPrefix( + r.Header.Get("Content-Type"), "application/grpc") { + server.ServeHTTP(w, r) + } else { + gwmux.ServeHTTP(w, r) + } + })) +} + +// SetupWithManager sets up the controller with the Manager. +func (s *ControllerService) SetupWithManager(mgr ctrl.Manager) error { + return mgr.Add(s) +} diff --git a/controller/internal/service/dashboard_service.go b/controller/internal/service/dashboard_service.go new file mode 100644 index 000000000..ac3d03fa8 --- /dev/null +++ b/controller/internal/service/dashboard_service.go @@ -0,0 +1,82 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package service + +import ( + "context" + "embed" + "html/template" + "net/http" + + "github.com/gin-gonic/gin" + jumpstarterdevv1alpha1 "github.com/jumpstarter-dev/jumpstarter-controller/api/v1alpha1" + + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +//go:embed templates/* +var fs embed.FS + +type DashboardService struct { + client.Client + Scheme *runtime.Scheme +} + +func (s *DashboardService) Start(ctx context.Context) error { + r := gin.Default() + + r.SetHTMLTemplate(template.Must(template.ParseFS(fs, "templates/*"))) + + r.GET("/", func(c *gin.Context) { + var exporters jumpstarterdevv1alpha1.ExporterList + if err := s.List(ctx, &exporters); err != nil { + c.String(http.StatusInternalServerError, err.Error()) + return + } + + var clients jumpstarterdevv1alpha1.ClientList + if err := s.List(ctx, &clients); err != nil { + c.String(http.StatusInternalServerError, err.Error()) + return + } + + var leases jumpstarterdevv1alpha1.LeaseList + if err := s.List(ctx, &leases); err != nil { + c.String(http.StatusInternalServerError, err.Error()) + return + } + + c.HTML(http.StatusOK, "index.html", map[string]interface{}{ + "Exporters": exporters.Items, + "Clients": clients.Items, + "Leases": leases.Items, + }) + }) + + return r.Run(":8084") +} + +func (s *DashboardService) NeedLeaderElection() bool { + return false +} + +// SetupWithManager sets up the controller with the Manager. +func (s *DashboardService) SetupWithManager(mgr ctrl.Manager) error { + return mgr.Add(s) +} diff --git a/controller/internal/service/endpoints.go b/controller/internal/service/endpoints.go new file mode 100644 index 000000000..93d0cc379 --- /dev/null +++ b/controller/internal/service/endpoints.go @@ -0,0 +1,35 @@ +package service + +import ( + "net" + "os" +) + +func controllerEndpoint() string { + ep := os.Getenv("GRPC_ENDPOINT") + if ep == "" { + return "localhost:8082" + } + return ep +} + +func routerEndpoint() string { + ep := os.Getenv("GRPC_ROUTER_ENDPOINT") + if ep == "" { + return "localhost:8083" + } + return ep +} + +func endpointToSAN(endpoint string) ([]string, []net.IP, error) { + host, _, err := net.SplitHostPort(endpoint) + if err != nil { + return nil, nil, err + } + ip := net.ParseIP(host) + if ip != nil { + return []string{}, []net.IP{ip}, nil + } else { + return []string{host}, []net.IP{}, nil + } +} diff --git a/controller/internal/service/helpers.go b/controller/internal/service/helpers.go new file mode 100644 index 000000000..682eb9510 --- /dev/null +++ b/controller/internal/service/helpers.go @@ -0,0 +1,13 @@ +package service + +func MatchLabels(candidate map[string]string, target map[string]string) int { + count := 0 + for k, vt := range target { + if vc, ok := candidate[k]; ok && vc == vt { + count += 1 + } else { + return -1 + } + } + return count +} diff --git a/controller/internal/service/oidc_service.go b/controller/internal/service/oidc_service.go new file mode 100644 index 000000000..c2a45d713 --- /dev/null +++ b/controller/internal/service/oidc_service.go @@ -0,0 +1,43 @@ +package service + +import ( + "context" + "crypto/tls" + "net" + + "github.com/gin-gonic/gin" + "github.com/jumpstarter-dev/jumpstarter-controller/internal/oidc" + ctrl "sigs.k8s.io/controller-runtime" +) + +// RouterService exposes a gRPC service +type OIDCService struct { + Signer *oidc.Signer + Cert *tls.Certificate +} + +func (s *OIDCService) Start(ctx context.Context) error { + r := gin.Default() + + s.Signer.Register(r) + + lis, err := net.Listen("tcp", "127.0.0.1:8085") + if err != nil { + return err + } + + tlslis := tls.NewListener(lis, &tls.Config{ + Certificates: []tls.Certificate{*s.Cert}, + }) + + return r.RunListener(tlslis) +} + +func (s *OIDCService) NeedLeaderElection() bool { + return false +} + +// SetupWithManager sets up the controller with the Manager. +func (s *OIDCService) SetupWithManager(mgr ctrl.Manager) error { + return mgr.Add(s) +} diff --git a/controller/internal/service/router_service.go b/controller/internal/service/router_service.go new file mode 100644 index 000000000..d3c94d418 --- /dev/null +++ b/controller/internal/service/router_service.go @@ -0,0 +1,176 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package service + +import ( + "context" + "crypto/tls" + "fmt" + "net" + "os" + "sync" + + "github.com/golang-jwt/jwt/v5" + "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/recovery" + "github.com/jumpstarter-dev/jumpstarter-controller/internal/authentication" + pb "github.com/jumpstarter-dev/jumpstarter-controller/internal/protocol/jumpstarter/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/reflection" + "google.golang.org/grpc/status" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +// RouterService exposes a gRPC service +type RouterService struct { + pb.UnimplementedRouterServiceServer + ServerOption grpc.ServerOption + pending sync.Map +} + +type streamContext struct { + cancel context.CancelFunc + stream pb.RouterService_StreamServer +} + +func (s *RouterService) authenticate(ctx context.Context) (string, error) { + token, err := authentication.BearerTokenFromContext(ctx) + if err != nil { + return "", err + } + + parsed, err := jwt.ParseWithClaims( + token, + &jwt.RegisteredClaims{}, + func(t *jwt.Token) (any, error) { return []byte(os.Getenv("ROUTER_KEY")), nil }, + jwt.WithIssuer("https://jumpstarter.dev/stream"), + jwt.WithAudience("https://jumpstarter.dev/router"), + jwt.WithIssuedAt(), + jwt.WithExpirationRequired(), + jwt.WithValidMethods([]string{ + jwt.SigningMethodHS256.Name, + jwt.SigningMethodHS384.Name, + jwt.SigningMethodHS512.Name, + }), + ) + + if err != nil || !parsed.Valid { + return "", status.Errorf(codes.InvalidArgument, "invalid jwt token") + } + + return parsed.Claims.GetSubject() +} + +func (s *RouterService) Stream(stream pb.RouterService_StreamServer) error { + ctx := stream.Context() + logger := log.FromContext(ctx) + + streamName, err := s.authenticate(ctx) + if err != nil { + logger.Error(err, "failed to authenticate") + return err + } + + logger.Info("streaming", "stream", streamName) + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + sctx := streamContext{ + cancel: cancel, + stream: stream, + } + + actual, loaded := s.pending.LoadOrStore(streamName, sctx) + if loaded { + defer actual.(streamContext).cancel() + logger.Info("forwarding", "stream", streamName) + return Forward(ctx, stream, actual.(streamContext).stream) + } else { + logger.Info("waiting for the other side", "stream", streamName) + <-ctx.Done() + return nil + } +} + +func (s *RouterService) Start(ctx context.Context) error { + log := log.FromContext(ctx) + + dnsnames, ipaddresses, err := endpointToSAN(routerEndpoint()) + if err != nil { + return err + } + + // Handle external certificate if provided via environment variables. + // Environment variables EXTERNAL_CERT_PEM and EXTERNAL_KEY_PEM should contain the PEM-encoded + // certificate and private key respectively. If both are set, they are used; otherwise + // a self-signed certificate is generated. + var cert *tls.Certificate + certPEMPath := os.Getenv("EXTERNAL_CERT_PEM") + keyPEMPath := os.Getenv("EXTERNAL_KEY_PEM") + if certPEMPath != "" && keyPEMPath != "" { + certPEMBytes, err := os.ReadFile(certPEMPath) + if err != nil { + return fmt.Errorf("failed to read external certificate file: %w", err) + } + keyPEMBytes, err := os.ReadFile(keyPEMPath) + if err != nil { + return fmt.Errorf("failed to read external key file: %w", err) + } + parsedCert, err := tls.X509KeyPair(certPEMBytes, keyPEMBytes) + if err != nil { + return fmt.Errorf("failed to parse external certificate: %w", err) + } + cert = &parsedCert + } else { + cert, err = NewSelfSignedCertificate("jumpstarter router", dnsnames, ipaddresses) + if err != nil { + return err + } + } + + server := grpc.NewServer( + grpc.Creds(credentials.NewServerTLSFromCert(cert)), + grpc.ChainUnaryInterceptor(recovery.UnaryServerInterceptor()), + grpc.ChainStreamInterceptor(recovery.StreamServerInterceptor()), + s.ServerOption, + ) + + pb.RegisterRouterServiceServer(server, s) + + reflection.Register(server) + listener, err := net.Listen("tcp", ":8083") + if err != nil { + return err + } + + log.Info("Starting grpc router service on port 8083") + go func() { + <-ctx.Done() + log.Info("Stopping grpc router service") + server.Stop() + }() + + return server.Serve(listener) +} + +// SetupWithManager sets up the controller with the Manager. +func (s *RouterService) SetupWithManager(mgr ctrl.Manager) error { + return mgr.Add(s) +} diff --git a/controller/internal/service/router_support.go b/controller/internal/service/router_support.go new file mode 100644 index 000000000..705e080ec --- /dev/null +++ b/controller/internal/service/router_support.go @@ -0,0 +1,46 @@ +package service + +import ( + "context" + "errors" + "io" + + pb "github.com/jumpstarter-dev/jumpstarter-controller/internal/protocol/jumpstarter/v1" + "golang.org/x/sync/errgroup" +) + +func pipe(a pb.RouterService_StreamServer, b pb.RouterService_StreamServer) error { + for { + msg, err := a.Recv() + if errors.Is(err, io.EOF) { + return nil + } + if err != nil { + return err + } + err = b.Send(&pb.StreamResponse{ + Payload: msg.GetPayload(), + FrameType: msg.GetFrameType(), + }) + if err != nil { + return err + } + } +} + +func Forward(ctx context.Context, a pb.RouterService_StreamServer, b pb.RouterService_StreamServer) error { + g, ctx := errgroup.WithContext(ctx) + g.Go(func() error { return pipe(a, b) }) + g.Go(func() error { return pipe(b, a) }) + // In case both tasks return nil + // Reference: https://pkg.go.dev/golang.org/x/sync/errgroup#WithContext + // The derived Context is canceled the first time a function + // passed to Go returns a non-nil error or the first time + // Wait returns, whichever occurs first. + go func() { + _ = g.Wait() + }() + // Return on first error + <-ctx.Done() + return g.Wait() +} diff --git a/controller/internal/service/selfsigned.go b/controller/internal/service/selfsigned.go new file mode 100644 index 000000000..7dd926bbd --- /dev/null +++ b/controller/internal/service/selfsigned.go @@ -0,0 +1,40 @@ +package service + +import ( + "crypto/rand" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "math/big" + "net" + "time" +) + +func NewSelfSignedCertificate(commonName string, dnsnames []string, ipaddresses []net.IP) (*tls.Certificate, error) { + template := x509.Certificate{ + SerialNumber: big.NewInt(1), + Subject: pkix.Name{CommonName: commonName}, + Issuer: pkix.Name{CommonName: commonName}, + NotBefore: time.Now(), + NotAfter: time.Now().Add(365 * 24 * time.Hour), + BasicConstraintsValid: true, + DNSNames: dnsnames, + IPAddresses: ipaddresses, + } + + priv, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return nil, err + } + + certificate, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv) + if err != nil { + return nil, err + } + + return &tls.Certificate{ + Certificate: [][]byte{certificate}, + PrivateKey: priv, + }, nil +} diff --git a/controller/internal/service/templates/index.html b/controller/internal/service/templates/index.html new file mode 100644 index 000000000..633a1d016 --- /dev/null +++ b/controller/internal/service/templates/index.html @@ -0,0 +1,95 @@ + + + + + + + + + + + + + + + + +

Exporters

+ + + + + + + + + {{ range .Exporters }} + + + + + {{ end }} + +
NamespaceName
{{ .Namespace }}{{ .Name }}
+

Clients

+ + + + + + + + + {{ range .Clients }} + + + + + {{ end }} + +
NamespaceName
{{ .Namespace }}{{ .Name }}
+

Leases

+ + + + + + + + + + + + + + {{ range .Leases }} + + + + + + {{ if .Status.ExporterRef }} + + {{ else }} + + {{ end }} + + + + {{ if .Status.BeginTime }} + + {{ else }} + + {{ end }} + + {{ if .Status.EndTime }} + + {{ else }} + + {{ end }} + + {{ end }} + +
NamespaceNameClientExporterEndedBegin timeEnd time
{{ .Namespace }}{{ .Name }}{{ .Spec.ClientRef.Name }}{{ .Status.ExporterRef.Name }}{{ .Status.Ended }}{{ .Status.BeginTime }}{{ .Status.EndTime }}
+ + diff --git a/controller/internal/service/utils/identifier.go b/controller/internal/service/utils/identifier.go new file mode 100644 index 000000000..1f61b5332 --- /dev/null +++ b/controller/internal/service/utils/identifier.go @@ -0,0 +1,95 @@ +package utils + +import ( + "fmt" + "strings" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + kclient "sigs.k8s.io/controller-runtime/pkg/client" +) + +func ParseNamespaceIdentifier(identifier string) (namespace string, err error) { + segments := strings.Split(identifier, "/") + + if len(segments) != 2 { + return "", status.Errorf( + codes.InvalidArgument, + "invalid number of segments in identifier \"%s\", expecting 2, got %d", + identifier, + len(segments), + ) + } + + if segments[0] != "namespaces" { + return "", status.Errorf( + codes.InvalidArgument, + "invalid first segment in identifier \"%s\", expecting \"namespaces\", got \"%s\"", + identifier, + segments[0], + ) + } + + return segments[1], nil +} + +func ParseObjectIdentifier(identifier string, kind string) (key *kclient.ObjectKey, err error) { + segments := strings.Split(identifier, "/") + + if len(segments) != 4 { + return nil, status.Errorf( + codes.InvalidArgument, + "invalid number of segments in identifier \"%s\", expecting 4, got %d", + identifier, + len(segments), + ) + } + + if segments[0] != "namespaces" { + return nil, status.Errorf( + codes.InvalidArgument, + "invalid first segment in identifier \"%s\", expecting \"namespaces\", got \"%s\"", + identifier, + segments[0], + ) + } + + if segments[2] != kind { + return nil, status.Errorf( + codes.InvalidArgument, + "invalid third segment in identifier \"%s\", expecting \"%s\", got \"%s\"", + identifier, + kind, + segments[2], + ) + } + + return &kclient.ObjectKey{ + Namespace: segments[1], + Name: segments[3], + }, nil +} + +func UnparseObjectIdentifier(key kclient.ObjectKey, kind string) string { + return fmt.Sprintf("namespaces/%s/%s/%s", key.Namespace, kind, key.Name) +} + +func ParseExporterIdentifier(identifier string) (key *kclient.ObjectKey, err error) { + return ParseObjectIdentifier(identifier, "exporters") +} + +func UnparseExporterIdentifier(key kclient.ObjectKey) string { + return UnparseObjectIdentifier(key, "exporters") +} + +func ParseLeaseIdentifier(identifier string) (key *kclient.ObjectKey, err error) { + return ParseObjectIdentifier(identifier, "leases") +} + +func UnparseLeaseIdentifier(key kclient.ObjectKey) string { + return UnparseObjectIdentifier(key, "leases") +} + +func ParseClientIdentifier(identifier string) (key *kclient.ObjectKey, err error) { + return ParseObjectIdentifier(identifier, "clients") +} diff --git a/controller/test/e2e/e2e_suite_test.go b/controller/test/e2e/e2e_suite_test.go new file mode 100644 index 000000000..7b80aafa0 --- /dev/null +++ b/controller/test/e2e/e2e_suite_test.go @@ -0,0 +1,32 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "fmt" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +// Run e2e tests using the Ginkgo runner. +func TestE2E(t *testing.T) { + RegisterFailHandler(Fail) + _, _ = fmt.Fprintf(GinkgoWriter, "Starting jumpstarter-router suite\n") + RunSpecs(t, "e2e suite") +} diff --git a/controller/test/e2e/e2e_test.go b/controller/test/e2e/e2e_test.go new file mode 100644 index 000000000..2d17e7210 --- /dev/null +++ b/controller/test/e2e/e2e_test.go @@ -0,0 +1,122 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "fmt" + "os/exec" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/jumpstarter-dev/jumpstarter-controller/test/utils" +) + +const namespace = "jumpstarter-router-system" + +var _ = Describe("controller", Ordered, func() { + BeforeAll(func() { + By("installing prometheus operator") + Expect(utils.InstallPrometheusOperator()).To(Succeed()) + + By("installing the cert-manager") + Expect(utils.InstallCertManager()).To(Succeed()) + + By("creating manager namespace") + cmd := exec.Command("kubectl", "create", "ns", namespace) + _, _ = utils.Run(cmd) + }) + + AfterAll(func() { + By("uninstalling the Prometheus manager bundle") + utils.UninstallPrometheusOperator() + + By("uninstalling the cert-manager bundle") + utils.UninstallCertManager() + + By("removing manager namespace") + cmd := exec.Command("kubectl", "delete", "ns", namespace) + _, _ = utils.Run(cmd) + }) + + Context("Operator", func() { + It("should run successfully", func() { + var controllerPodName string + var err error + + // projectimage stores the name of the image used in the example + var projectimage = "example.com/jumpstarter-router:v0.0.1" + + By("building the manager(Operator) image") + cmd := exec.Command("make", "docker-build", fmt.Sprintf("IMG=%s", projectimage)) + _, err = utils.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("loading the the manager(Operator) image on Kind") + err = utils.LoadImageToKindClusterWithName(projectimage) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("installing CRDs") + cmd = exec.Command("make", "install") + _, err = utils.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("deploying the controller-manager") + cmd = exec.Command("make", "deploy", fmt.Sprintf("IMG=%s", projectimage)) + _, err = utils.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("validating that the controller-manager pod is running as expected") + verifyControllerUp := func() error { + // Get pod name + + cmd = exec.Command("kubectl", "get", + "pods", "-l", "control-plane=controller-manager", + "-o", "go-template={{ range .items }}"+ + "{{ if not .metadata.deletionTimestamp }}"+ + "{{ .metadata.name }}"+ + "{{ \"\\n\" }}{{ end }}{{ end }}", + "-n", namespace, + ) + + podOutput, err := utils.Run(cmd) + ExpectWithOffset(2, err).NotTo(HaveOccurred()) + podNames := utils.GetNonEmptyLines(string(podOutput)) + if len(podNames) != 1 { + return fmt.Errorf("expect 1 controller pods running, but got %d", len(podNames)) + } + controllerPodName = podNames[0] + ExpectWithOffset(2, controllerPodName).Should(ContainSubstring("controller-manager")) + + // Validate pod status + cmd = exec.Command("kubectl", "get", + "pods", controllerPodName, "-o", "jsonpath={.status.phase}", + "-n", namespace, + ) + status, err := utils.Run(cmd) + ExpectWithOffset(2, err).NotTo(HaveOccurred()) + if string(status) != "Running" { + return fmt.Errorf("controller pod in %s status", status) + } + return nil + } + EventuallyWithOffset(1, verifyControllerUp, time.Minute, time.Second).Should(Succeed()) + + }) + }) +}) diff --git a/controller/test/utils/utils.go b/controller/test/utils/utils.go new file mode 100644 index 000000000..0deb50a31 --- /dev/null +++ b/controller/test/utils/utils.go @@ -0,0 +1,140 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package utils + +import ( + "fmt" + "os" + "os/exec" + "strings" + + . "github.com/onsi/ginkgo/v2" //nolint:staticcheck +) + +const ( + prometheusOperatorVersion = "v0.72.0" + prometheusOperatorURL = "https://github.com/prometheus-operator/prometheus-operator/" + + "releases/download/%s/bundle.yaml" + + certmanagerVersion = "v1.14.4" + certmanagerURLTmpl = "https://github.com/jetstack/cert-manager/releases/download/%s/cert-manager.yaml" +) + +func warnError(err error) { + _, _ = fmt.Fprintf(GinkgoWriter, "warning: %v\n", err) +} + +// InstallPrometheusOperator installs the prometheus Operator to be used to export the enabled metrics. +func InstallPrometheusOperator() error { + url := fmt.Sprintf(prometheusOperatorURL, prometheusOperatorVersion) + cmd := exec.Command("kubectl", "create", "-f", url) + _, err := Run(cmd) + return err +} + +// Run executes the provided command within this context +func Run(cmd *exec.Cmd) ([]byte, error) { + dir, _ := GetProjectDir() + cmd.Dir = dir + + if err := os.Chdir(cmd.Dir); err != nil { + _, _ = fmt.Fprintf(GinkgoWriter, "chdir dir: %s\n", err) + } + + cmd.Env = append(os.Environ(), "GO111MODULE=on") + command := strings.Join(cmd.Args, " ") + _, _ = fmt.Fprintf(GinkgoWriter, "running: %s\n", command) + output, err := cmd.CombinedOutput() + if err != nil { + return output, fmt.Errorf("%s failed with error: (%v) %s", command, err, string(output)) + } + + return output, nil +} + +// UninstallPrometheusOperator uninstalls the prometheus +func UninstallPrometheusOperator() { + url := fmt.Sprintf(prometheusOperatorURL, prometheusOperatorVersion) + cmd := exec.Command("kubectl", "delete", "-f", url) + if _, err := Run(cmd); err != nil { + warnError(err) + } +} + +// UninstallCertManager uninstalls the cert manager +func UninstallCertManager() { + url := fmt.Sprintf(certmanagerURLTmpl, certmanagerVersion) + cmd := exec.Command("kubectl", "delete", "-f", url) + if _, err := Run(cmd); err != nil { + warnError(err) + } +} + +// InstallCertManager installs the cert manager bundle. +func InstallCertManager() error { + url := fmt.Sprintf(certmanagerURLTmpl, certmanagerVersion) + cmd := exec.Command("kubectl", "apply", "-f", url) + if _, err := Run(cmd); err != nil { + return err + } + // Wait for cert-manager-webhook to be ready, which can take time if cert-manager + // was re-installed after uninstalling on a cluster. + cmd = exec.Command("kubectl", "wait", "deployment.apps/cert-manager-webhook", + "--for", "condition=Available", + "--namespace", "cert-manager", + "--timeout", "5m", + ) + + _, err := Run(cmd) + return err +} + +// LoadImageToKindCluster loads a local docker image to the kind cluster +func LoadImageToKindClusterWithName(name string) error { + cluster := "kind" + if v, ok := os.LookupEnv("KIND_CLUSTER"); ok { + cluster = v + } + kindOptions := []string{"load", "docker-image", name, "--name", cluster} + cmd := exec.Command("kind", kindOptions...) + _, err := Run(cmd) + return err +} + +// GetNonEmptyLines converts given command output string into individual objects +// according to line breakers, and ignores the empty elements in it. +func GetNonEmptyLines(output string) []string { + var res []string + elements := strings.Split(output, "\n") + for _, element := range elements { + if element != "" { + res = append(res, element) + } + } + + return res +} + +// GetProjectDir will return the directory where the project is +func GetProjectDir() (string, error) { + wd, err := os.Getwd() + if err != nil { + return wd, err + } + wd = strings.ReplaceAll(wd, "/test/e2e", "") + return wd, nil +} diff --git a/e2e/LICENSE b/e2e/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/e2e/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/e2e/ca-config.json b/e2e/ca-config.json new file mode 100644 index 000000000..e3e997a9e --- /dev/null +++ b/e2e/ca-config.json @@ -0,0 +1,26 @@ +{ + "signing": { + "default": { + "expiry": "168h" + }, + "profiles": { + "www": { + "expiry": "8760h", + "usages": [ + "signing", + "key encipherment", + "server auth" + ] + }, + "client": { + "expiry": "8760h", + "usages": [ + "signing", + "key encipherment", + "client auth" + ] + } + } + } +} + diff --git a/e2e/ca-csr.json b/e2e/ca-csr.json new file mode 100644 index 000000000..b927a1dc9 --- /dev/null +++ b/e2e/ca-csr.json @@ -0,0 +1,19 @@ +{ + "CN": "example.net", + "hosts": [ + "example.net", + "www.example.net" + ], + "key": { + "algo": "ecdsa", + "size": 256 + }, + "names": [ + { + "C": "US", + "ST": "CA", + "L": "San Francisco" + } + ] +} + diff --git a/e2e/dex-csr.json b/e2e/dex-csr.json new file mode 100644 index 000000000..860c7b26a --- /dev/null +++ b/e2e/dex-csr.json @@ -0,0 +1,18 @@ +{ + "CN": "dex.dex.svc.cluster.local", + "hosts": [ + "dex.dex.svc.cluster.local" + ], + "key": { + "algo": "ecdsa", + "size": 256 + }, + "names": [ + { + "C": "US", + "ST": "CA", + "L": "San Francisco" + } + ] +} + diff --git a/e2e/dex.values.yaml b/e2e/dex.values.yaml new file mode 100644 index 000000000..9cb28681c --- /dev/null +++ b/e2e/dex.values.yaml @@ -0,0 +1,59 @@ +https: + enabled: true +config: + issuer: https://dex.dex.svc.cluster.local:5556 + web: + tlsCert: /etc/dex/tls/tls.crt + tlsKey: /etc/dex/tls/tls.key + storage: + type: kubernetes + config: + inCluster: true + staticClients: + - id: jumpstarter-cli + name: Jumpstarter CLI + public: true + oauth2: + responseTypes: ["code", "token", "id_token", "id_token token"] + passwordConnector: local + enablePasswordDB: true + staticPasswords: + - email: "test-client-oidc@example.com" + hash: "$2a$10$2b2cU8CPhOTaGrs1HRQuAueS7JTT5ZHsHSzYiFPm1leZck7Mc8T4W" # password + username: "test-client-oidc" + userID: "73bca0b9-9be6-4e73-a8fb-347c2ac23255" + - email: "test-client-oidc-provisioning@example.com" + hash: "$2a$10$2b2cU8CPhOTaGrs1HRQuAueS7JTT5ZHsHSzYiFPm1leZck7Mc8T4W" # password + username: "test-client-oidc-provisioning" + userID: "464d9494-5cc3-44e1-a380-c0403bd31fcb" + - email: "test-exporter-oidc@example.com" + hash: "$2a$10$2b2cU8CPhOTaGrs1HRQuAueS7JTT5ZHsHSzYiFPm1leZck7Mc8T4W" # password + username: "test-exporter-oidc" + userID: "a4cb4de2-4467-4e5c-a42a-33be8783649d" + connectors: + - name: kubernetes + type: oidc + id: kubernetes + config: + # kubectl get --raw /.well-known/openid-configuration | jq -r '.issuer' + issuer: "https://kubernetes.default.svc.cluster.local" + rootCAs: + - /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + userNameKey: sub + scopes: + - profile +volumes: + - name: tls + secret: + secretName: dex-tls +volumeMounts: + - name: tls + mountPath: /etc/dex/tls +service: + type: NodePort + ports: + http: + port: 5554 + https: + port: 5556 + nodePort: 32000 diff --git a/e2e/exporter.yaml b/e2e/exporter.yaml new file mode 100644 index 000000000..32254eafb --- /dev/null +++ b/e2e/exporter.yaml @@ -0,0 +1,5 @@ +export: + power: + type: jumpstarter_driver_power.driver.MockPower + storage: + type: jumpstarter_driver_opendal.driver.MockStorageMux diff --git a/e2e/kind_cluster.yaml b/e2e/kind_cluster.yaml new file mode 100644 index 000000000..419386c48 --- /dev/null +++ b/e2e/kind_cluster.yaml @@ -0,0 +1,37 @@ +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +kubeadmConfigPatches: +- | + kind: ClusterConfiguration + apiServer: + extraArgs: + "service-node-port-range": "3000-32767" +- | + kind: InitConfiguration + nodeRegistration: + kubeletExtraArgs: + node-labels: "ingress-ready=true" +nodes: +- role: control-plane + extraPortMappings: + - containerPort: 80 # ingress controller + hostPort: 5080 + protocol: TCP + - containerPort: 30010 # grpc nodeport + hostPort: 8082 + protocol: TCP + - containerPort: 30011 # grpc router nodeport + hostPort: 8083 + protocol: TCP + - containerPort: 32000 # dex nodeport + hostPort: 5556 + protocol: TCP + + - containerPort: 443 + hostPort: 5443 + protocol: TCP +# if we needed to mount a hostPath volume into the kind cluster, we can do it like this +# extraMounts: +# - hostPath: ./bin/e2e-certs +# containerPath: /tmp/e2e-certs + diff --git a/e2e/run-e2e.sh b/e2e/run-e2e.sh new file mode 100755 index 000000000..203fad286 --- /dev/null +++ b/e2e/run-e2e.sh @@ -0,0 +1,170 @@ +#!/usr/bin/env bash +# Jumpstarter End-to-End Test Runner +# This script runs the e2e test suite (assumes setup-e2e.sh was run first) + +set -euo pipefail + +# Get the directory where this script is located +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + +# Get the monorepo root (parent of e2e directory) +REPO_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" + +# Color output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +NC='\033[0m' # No Color + +log_info() { + echo -e "${GREEN}[INFO]${NC} $*" +} + +log_warn() { + echo -e "${YELLOW}[WARN]${NC} $*" +} + +log_error() { + echo -e "${RED}[ERROR]${NC} $*" +} + +# Check if running in CI +is_ci() { + [ -n "${CI:-}" ] || [ -n "${GITHUB_ACTIONS:-}" ] +} + +# Check if setup was completed +check_setup() { + if [ ! -f "$REPO_ROOT/.e2e-setup-complete" ]; then + log_error "Setup not complete! Please run setup-e2e.sh first:" + log_error " bash e2e/setup-e2e.sh" + log_error "" + log_error "Or in CI mode, run the full setup automatically" + return 1 + fi + + # Load setup configuration + source "$REPO_ROOT/.e2e-setup-complete" + + # Export SSL certificate paths for Python + export SSL_CERT_FILE + export REQUESTS_CA_BUNDLE + + # Verify critical components are still running + if ! kubectl get namespace "$JS_NAMESPACE" &> /dev/null; then + log_error "Namespace $JS_NAMESPACE not found. Please run setup-e2e.sh again." + return 1 + fi + + log_info "✓ Setup verified" + return 0 +} + +# Setup environment for bats +setup_bats_env() { + # Always set BATS_LIB_PATH to include local libraries + local LOCAL_BATS_LIB="$REPO_ROOT/.bats/lib" + + if [ -d "$LOCAL_BATS_LIB" ]; then + export BATS_LIB_PATH="$LOCAL_BATS_LIB:${BATS_LIB_PATH:-}" + log_info "Set BATS_LIB_PATH to local libraries: $BATS_LIB_PATH" + else + log_warn "Local bats libraries not found at $LOCAL_BATS_LIB" + log_warn "You may need to run setup-e2e.sh first" + fi +} + +# Run the tests +run_tests() { + log_info "Running jumpstarter e2e tests..." + + cd "$REPO_ROOT" + + # Activate virtual environment + if [ -f .venv/bin/activate ]; then + source .venv/bin/activate + else + log_error "Virtual environment not found. Please run setup-e2e.sh first." + exit 1 + fi + + # Use insecure GRPC for testing + export JUMPSTARTER_GRPC_INSECURE=1 + + # Export variables for bats + export JS_NAMESPACE="${JS_NAMESPACE}" + export ENDPOINT="${ENDPOINT}" + + # Setup bats environment + setup_bats_env + + # Run bats tests + log_info "Running bats tests..." + bats --show-output-of-passing-tests --verbose-run "$SCRIPT_DIR"/tests.bats +} + +# Full setup and run (for CI or first-time use) +full_run() { + log_info "Running full setup + test cycle..." + + if [ -f "$SCRIPT_DIR/setup-e2e.sh" ]; then + bash "$SCRIPT_DIR/setup-e2e.sh" + else + log_error "setup-e2e.sh not found!" + exit 1 + fi + + # After setup, load the configuration + if [ -f "$REPO_ROOT/.e2e-setup-complete" ]; then + source "$REPO_ROOT/.e2e-setup-complete" + # Export SSL certificate paths for Python + export SSL_CERT_FILE + export REQUESTS_CA_BUNDLE + fi + + run_tests +} + +# Main execution +main() { + # Default namespace + export JS_NAMESPACE="${JS_NAMESPACE:-jumpstarter-lab}" + + log_info "=== Jumpstarter E2E Test Runner ===" + log_info "Namespace: $JS_NAMESPACE" + log_info "Repository Root: $REPO_ROOT" + echo "" + + # If --full flag is passed, always run full setup + if [[ "${1:-}" == "--full" ]]; then + full_run + # In CI mode, check if setup was already done + elif is_ci; then + if check_setup 2>/dev/null; then + log_info "Setup already complete, skipping setup and running tests..." + run_tests + else + log_info "Setup not found in CI, running full setup..." + full_run + fi + else + # Local development: require setup to be done first + if check_setup; then + run_tests + else + log_error "" + log_error "Setup is required before running tests." + log_error "" + log_error "Options:" + log_error " 1. Run setup first: bash e2e/setup-e2e.sh" + log_error " 2. Run full cycle: bash e2e/run-e2e.sh --full" + exit 1 + fi + fi + + echo "" + log_info "✓✓✓ All e2e tests completed successfully! ✓✓✓" +} + +# Run main function +main "$@" diff --git a/e2e/setup-e2e.sh b/e2e/setup-e2e.sh new file mode 100755 index 000000000..96a4c7e69 --- /dev/null +++ b/e2e/setup-e2e.sh @@ -0,0 +1,364 @@ +#!/usr/bin/env bash +# Jumpstarter End-to-End Testing Setup Script +# This script performs one-time setup for e2e testing + +set -euo pipefail + +# Get the directory where this script is located +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + +# Get the monorepo root (parent of e2e directory) +REPO_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" + +# Default namespace for tests +export JS_NAMESPACE="${JS_NAMESPACE:-jumpstarter-lab}" + +# Color output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +NC='\033[0m' # No Color + +log_info() { + echo -e "${GREEN}[INFO]${NC} $*" +} + +log_warn() { + echo -e "${YELLOW}[WARN]${NC} $*" +} + +log_error() { + echo -e "${RED}[ERROR]${NC} $*" +} + +# Check if running in CI +is_ci() { + [ -n "${CI:-}" ] || [ -n "${GITHUB_ACTIONS:-}" ] +} + +# Check if bats libraries are available +check_bats_libraries() { + if ! command -v bats &> /dev/null; then + return 1 + fi + + # Try to load the libraries + if ! bats --version &> /dev/null; then + return 1 + fi + + # Check if libraries can be loaded by testing with a simple script + local test_file=$(mktemp) + cat > "$test_file" <<'EOF' +setup() { + bats_load_library bats-support + bats_load_library bats-assert +} + +@test "dummy" { + run echo "test" + assert_success +} +EOF + + # Run test with current BATS_LIB_PATH + if bats "$test_file" &> /dev/null; then + rm -f "$test_file" + return 0 + else + rm -f "$test_file" + return 1 + fi +} + +# Install bats libraries locally (works on all systems) +install_bats_libraries_local() { + local LIB_DIR="$REPO_ROOT/.bats/lib" + local ORIGINAL_DIR="$PWD" + + log_info "Installing bats helper libraries to $LIB_DIR..." + + mkdir -p "$LIB_DIR" + cd "$LIB_DIR" + + # Install bats-support + if [ ! -d "bats-support" ]; then + log_info "Cloning bats-support..." + git clone --depth 1 https://github.com/bats-core/bats-support.git + else + log_info "bats-support already installed" + fi + + # Install bats-assert + if [ ! -d "bats-assert" ]; then + log_info "Cloning bats-assert..." + git clone --depth 1 https://github.com/bats-core/bats-assert.git + else + log_info "bats-assert already installed" + fi + + # Install bats-file + if [ ! -d "bats-file" ]; then + log_info "Cloning bats-file..." + git clone --depth 1 https://github.com/bats-core/bats-file.git + else + log_info "bats-file already installed" + fi + + cd "$ORIGINAL_DIR" + + # Set BATS_LIB_PATH + export BATS_LIB_PATH="$LIB_DIR:${BATS_LIB_PATH:-}" + + log_info "✓ Bats libraries installed successfully" + log_info "BATS_LIB_PATH set to: $BATS_LIB_PATH" + + # Verify installation worked + if check_bats_libraries; then + log_info "✓ Libraries verified and working" + else + log_error "Libraries installed but verification failed" + log_error "Please check that the following directories exist:" + log_error " $LIB_DIR/bats-support" + log_error " $LIB_DIR/bats-assert" + exit 1 + fi +} + +# Step 1: Install dependencies +install_dependencies() { + log_info "Installing dependencies..." + + # Install uv if not already installed + if ! command -v uv &> /dev/null; then + log_info "Installing uv..." + curl -LsSf https://astral.sh/uv/install.sh | sh + export PATH="$HOME/.cargo/bin:$PATH" + fi + + # Install Python 3.12 + log_info "Installing Python 3.12..." + uv python install 3.12 + + # Install bats if not already installed + if ! command -v bats &> /dev/null; then + log_info "Installing bats..." + if is_ci; then + sudo apt-get update + sudo apt-get install -y bats + elif [[ "$OSTYPE" == "darwin"* ]]; then + log_info "Installing bats-core via Homebrew..." + brew install bats-core + else + log_error "bats not found. Please install it manually:" + log_error " Ubuntu/Debian: sudo apt-get install bats" + log_error " Fedora/RHEL: sudo dnf install bats" + log_error " macOS: brew install bats-core" + exit 1 + fi + fi + + # Always install bats libraries locally for consistency across all systems + # This ensures libraries work regardless of package manager or distribution + if ! check_bats_libraries; then + log_info "Installing bats libraries locally..." + install_bats_libraries_local + else + log_info "✓ Bats libraries are already available" + # Still set BATS_LIB_PATH to include local directory for consistency + export BATS_LIB_PATH="$REPO_ROOT/.bats/lib:${BATS_LIB_PATH:-}" + fi + + log_info "✓ Dependencies installed" +} + +# Step 2: Deploy dex +deploy_dex() { + log_info "Deploying dex..." + + cd "$REPO_ROOT" + + # Generate certificates + log_info "Generating certificates..." + go run github.com/cloudflare/cfssl/cmd/cfssl@latest gencert -initca "$SCRIPT_DIR"/ca-csr.json | \ + go run github.com/cloudflare/cfssl/cmd/cfssljson@latest -bare ca - + go run github.com/cloudflare/cfssl/cmd/cfssl@latest gencert -ca=ca.pem -ca-key=ca-key.pem \ + -config="$SCRIPT_DIR"/ca-config.json -profile=www "$SCRIPT_DIR"/dex-csr.json | \ + go run github.com/cloudflare/cfssl/cmd/cfssljson@latest -bare server + + + make -C controller cluster + + # Create dex namespace and TLS secret + log_info "Creating dex namespace and secrets..." + kubectl create namespace dex + kubectl -n dex create secret tls dex-tls \ + --cert=server.pem \ + --key=server-key.pem + + # Create .e2e directory for configuration files + log_info "Creating .e2e directory for local configuration..." + mkdir -p "$REPO_ROOT/.e2e" + + # Copy values.kind.yaml to .e2e and inject the CA certificate + log_info "Creating values file with CA certificate..." + cp "$SCRIPT_DIR"/values.kind.yaml "$REPO_ROOT/.e2e/values.kind.yaml" + + log_info "Injecting CA certificate into values..." + go run github.com/mikefarah/yq/v4@latest -i \ + '.jumpstarter-controller.config.authentication.jwt[0].issuer.certificateAuthority = load_str("ca.pem")' \ + "$REPO_ROOT/.e2e/values.kind.yaml" + + log_info "✓ Values file with CA certificate created at .e2e/values.kind.yaml" + + # Create OIDC reviewer binding (important!) + log_info "Creating OIDC reviewer cluster role binding..." + kubectl create clusterrolebinding oidc-reviewer \ + --clusterrole=system:service-account-issuer-discovery \ + --group=system:unauthenticated + + # Install dex via helm + log_info "Installing dex via helm..." + helm repo add dex https://charts.dexidp.io + helm install --namespace dex --wait -f "$SCRIPT_DIR"/dex.values.yaml dex dex/dex + + # Install CA certificate + log_info "Installing CA certificate..." + if [[ "$OSTYPE" == "darwin"* ]]; then + # this may be unnecessary, but keeping it here for now + #log_warn "About to add the CA certificate to your macOS login keychain" + #security add-trusted-cert -d -r trustRoot -k ~/Library/Keychains/login.keychain-db ca.pem + #log_info "✓ CA certificate added to macOS login keychain" + true + else + log_warn "About to install the CA certificate system-wide (requires sudo)" + # Detect if this is a RHEL/Fedora system or Debian/Ubuntu system + if [ -d "/etc/pki/ca-trust/source/anchors" ]; then + # RHEL/Fedora/CentOS + sudo cp ca.pem /etc/pki/ca-trust/source/anchors/dex.crt + sudo update-ca-trust + log_info "✓ CA certificate installed system-wide (RHEL/Fedora)" + else + # Debian/Ubuntu + sudo cp ca.pem /usr/local/share/ca-certificates/dex.crt + sudo update-ca-certificates + log_info "✓ CA certificate installed system-wide (Debian/Ubuntu)" + fi + fi + + # Add dex to /etc/hosts if not already present + log_info "Checking /etc/hosts for dex entry..." + if ! grep -q "dex.dex.svc.cluster.local" /etc/hosts 2>/dev/null; then + log_warn "About to add 'dex.dex.svc.cluster.local' to /etc/hosts (requires sudo)" + echo "127.0.0.1 dex.dex.svc.cluster.local" | sudo tee -a /etc/hosts + log_info "✓ Added dex to /etc/hosts" + else + log_info "✓ dex.dex.svc.cluster.local already in /etc/hosts" + fi + + log_info "✓ Dex deployed" +} + +# Step 3: Deploy jumpstarter controller +deploy_controller() { + log_info "Deploying jumpstarter controller..." + + cd "$REPO_ROOT" + + # Deploy with modified values using EXTRA_VALUES environment variable + log_info "Deploying controller with CA certificate..." + EXTRA_VALUES="--values $REPO_ROOT/.e2e/values.kind.yaml" make -C controller deploy + + log_info "✓ Controller deployed" +} + +# Step 4: Install jumpstarter +install_jumpstarter() { + log_info "Installing jumpstarter..." + + cd "$REPO_ROOT" + + # Create virtual environment + uv venv + + # Install jumpstarter packages + uv pip install \ + ./python/packages/jumpstarter-cli \ + ./python/packages/jumpstarter-driver-composite \ + ./python/packages/jumpstarter-driver-power \ + ./python/packages/jumpstarter-driver-opendal + + log_info "✓ Jumpstarter installed" +} + +# Step 5: Setup test environment +setup_test_environment() { + log_info "Setting up test environment..." + + cd "$REPO_ROOT" + + # Get the controller endpoint + export ENDPOINT=$(helm get values jumpstarter --output json | jq -r '."jumpstarter-controller".grpc.endpoint') + log_info "Controller endpoint: $ENDPOINT" + + # Setup exporters directory + echo "Setting up exporters directory in /etc/jumpstarter/exporters..., will need permissions" + sudo mkdir -p /etc/jumpstarter/exporters + sudo chown "$USER" /etc/jumpstarter/exporters + + # Create service accounts + log_info "Creating service accounts..." + kubectl create -n "${JS_NAMESPACE}" sa test-client-sa + kubectl create -n "${JS_NAMESPACE}" sa test-exporter-sa + + # Create a marker file to indicate setup is complete + echo "ENDPOINT=$ENDPOINT" > "$REPO_ROOT/.e2e-setup-complete" + echo "JS_NAMESPACE=$JS_NAMESPACE" >> "$REPO_ROOT/.e2e-setup-complete" + echo "REPO_ROOT=$REPO_ROOT" >> "$REPO_ROOT/.e2e-setup-complete" + echo "SCRIPT_DIR=$SCRIPT_DIR" >> "$REPO_ROOT/.e2e-setup-complete" + + # Set SSL certificate paths for Python to use the generated CA + echo "SSL_CERT_FILE=$REPO_ROOT/ca.pem" >> "$REPO_ROOT/.e2e-setup-complete" + echo "REQUESTS_CA_BUNDLE=$REPO_ROOT/ca.pem" >> "$REPO_ROOT/.e2e-setup-complete" + + # Save BATS_LIB_PATH for test runs + echo "BATS_LIB_PATH=$BATS_LIB_PATH" >> "$REPO_ROOT/.e2e-setup-complete" + + log_info "✓ Test environment ready" +} + +# Main execution +main() { + log_info "=== Jumpstarter E2E Setup ===" + log_info "Namespace: $JS_NAMESPACE" + log_info "Repository Root: $REPO_ROOT" + log_info "Script Directory: $SCRIPT_DIR" + echo "" + + install_dependencies + echo "" + + deploy_dex + echo "" + + deploy_controller + echo "" + + install_jumpstarter + echo "" + + setup_test_environment + echo "" + + log_info "✓✓✓ Setup complete! ✓✓✓" + log_info "" + log_info "To run tests:" + log_info " cd $REPO_ROOT" + log_info " bash e2e/run-e2e.sh" + log_info "" + log_info "Or use the Makefile:" + log_info " make e2e" +} + +# Run main function +main "$@" diff --git a/e2e/tests.bats b/e2e/tests.bats new file mode 100644 index 000000000..703e15fc4 --- /dev/null +++ b/e2e/tests.bats @@ -0,0 +1,228 @@ +JS_NAMESPACE="${JS_NAMESPACE:-jumpstarter-lab}" + +# File to track bash wrapper process PIDs across tests +EXPORTER_PIDS_FILE="${BATS_RUN_TMPDIR:-/tmp}/exporter_pids.txt" + +setup_file() { + # Initialize the PIDs file at the start of all tests + echo "" > "$EXPORTER_PIDS_FILE" +} + +setup() { + bats_load_library bats-support + bats_load_library bats-assert + + bats_require_minimum_version 1.5.0 +} + +# teardown_file runs once after all tests complete (requires bats-core 1.5.0+) +teardown_file() { + echo "" >&2 + echo "========================================" >&2 + echo "TEARDOWN_FILE RUNNING" >&2 + echo "========================================" >&2 + echo "=== Cleaning up exporter bash processes ===" >&2 + + # Read PIDs from file + if [ -f "$EXPORTER_PIDS_FILE" ]; then + local pids=$(cat "$EXPORTER_PIDS_FILE" | tr '\n' ' ') + echo "Tracked PIDs from file: $pids" >&2 + + while IFS= read -r pid; do + if [ -n "$pid" ]; then + echo "Checking PID $pid..." >&2 + if ps -p "$pid" > /dev/null 2>&1; then + echo " Killing PID $pid" >&2 + kill -9 "$pid" 2>/dev/null || true + else + echo " PID $pid already terminated" >&2 + fi + fi + done < "$EXPORTER_PIDS_FILE" + else + echo "No PIDs file found at $EXPORTER_PIDS_FILE" >&2 + fi + + echo "Checking for orphaned jmp processes..." >&2 + local orphans=$(pgrep -f "jmp run --exporter" 2>/dev/null | wc -l) + echo "Found $orphans orphaned jmp processes" >&2 + + # remove orphaned processes + pkill -9 -f "jmp run --exporter" 2>/dev/null || true + + # Clean up the PIDs file + rm -f "$EXPORTER_PIDS_FILE" + + echo "=== Cleanup complete ===" >&2 +} + +wait_for_exporter() { + # After a lease operation the exporter is disconnecting from controller and reconnecting. + # The disconnect can take a short while so let's avoid catching the pre-disconnect state and early return + sleep 2 + kubectl -n "${JS_NAMESPACE}" wait --timeout 20m --for=condition=Online --for=condition=Registered \ + exporters.jumpstarter.dev/test-exporter-oidc + kubectl -n "${JS_NAMESPACE}" wait --timeout 20m --for=condition=Online --for=condition=Registered \ + exporters.jumpstarter.dev/test-exporter-sa + kubectl -n "${JS_NAMESPACE}" wait --timeout 20m --for=condition=Online --for=condition=Registered \ + exporters.jumpstarter.dev/test-exporter-legacy +} + +@test "can create clients with admin cli" { + jmp admin create client -n "${JS_NAMESPACE}" test-client-oidc --unsafe --out /dev/null \ + --oidc-username dex:test-client-oidc + jmp admin create client -n "${JS_NAMESPACE}" test-client-sa --unsafe --out /dev/null \ + --oidc-username dex:system:serviceaccount:"${JS_NAMESPACE}":test-client-sa + jmp admin create client -n "${JS_NAMESPACE}" test-client-legacy --unsafe --save +} + +@test "can create exporters with admin cli" { + jmp admin create exporter -n "${JS_NAMESPACE}" test-exporter-oidc --out /dev/null \ + --oidc-username dex:test-exporter-oidc \ + --label example.com/board=oidc + jmp admin create exporter -n "${JS_NAMESPACE}" test-exporter-sa --out /dev/null \ + --oidc-username dex:system:serviceaccount:"${JS_NAMESPACE}":test-exporter-sa \ + --label example.com/board=sa + jmp admin create exporter -n "${JS_NAMESPACE}" test-exporter-legacy --save \ + --label example.com/board=legacy +} + +@test "can login with oidc" { + jmp config client list + jmp config exporter list + + jmp login --client test-client-oidc \ + --endpoint "$ENDPOINT" --namespace "${JS_NAMESPACE}" --name test-client-oidc \ + --issuer https://dex.dex.svc.cluster.local:5556 \ + --username test-client-oidc@example.com --password password --unsafe + + jmp login --client test-client-oidc-provisioning \ + --endpoint "$ENDPOINT" --namespace "${JS_NAMESPACE}" --name "" \ + --issuer https://dex.dex.svc.cluster.local:5556 \ + --username test-client-oidc-provisioning@example.com --password password --unsafe + + jmp login --client test-client-sa \ + --endpoint "$ENDPOINT" --namespace "${JS_NAMESPACE}" --name test-client-sa \ + --issuer https://dex.dex.svc.cluster.local:5556 \ + --connector-id kubernetes \ + --token $(kubectl create -n "${JS_NAMESPACE}" token test-client-sa) --unsafe + + jmp login --exporter test-exporter-oidc \ + --endpoint "$ENDPOINT" --namespace "${JS_NAMESPACE}" --name test-exporter-oidc \ + --issuer https://dex.dex.svc.cluster.local:5556 \ + --username test-exporter-oidc@example.com --password password + + jmp login --exporter test-exporter-sa \ + --endpoint "$ENDPOINT" --namespace "${JS_NAMESPACE}" --name test-exporter-sa \ + --issuer https://dex.dex.svc.cluster.local:5556 \ + --connector-id kubernetes \ + --token $(kubectl create -n "${JS_NAMESPACE}" token test-exporter-sa) + + go run github.com/mikefarah/yq/v4@latest -i ". * load(\"e2e/exporter.yaml\")" \ + /etc/jumpstarter/exporters/test-exporter-oidc.yaml + go run github.com/mikefarah/yq/v4@latest -i ". * load(\"e2e/exporter.yaml\")" \ + /etc/jumpstarter/exporters/test-exporter-sa.yaml + go run github.com/mikefarah/yq/v4@latest -i ". * load(\"e2e/exporter.yaml\")" \ + /etc/jumpstarter/exporters/test-exporter-legacy.yaml + + jmp config client list + jmp config exporter list +} + +@test "can run exporters" { + cat <&- & +while true; do + jmp run --exporter test-exporter-oidc +done +EOF + echo "$!" >> "$EXPORTER_PIDS_FILE" + + cat <&- & +while true; do + jmp run --exporter test-exporter-sa +done +EOF + echo "$!" >> "$EXPORTER_PIDS_FILE" + + cat <&- & +while true; do + jmp run --exporter test-exporter-legacy +done +EOF + echo "$!" >> "$EXPORTER_PIDS_FILE" + + wait_for_exporter +} + +@test "can specify client config only using environment variables" { + wait_for_exporter + + # we feed the namespace into JMP_NAMESPACE along with all the other client details + # to verify that the client can operate without a config file + JMP_NAMESPACE="${JS_NAMESPACE}" \ + JMP_DRIVERS_ALLOW="*" \ + JMP_NAME=test-client-legacy \ + JMP_ENDPOINT=$(kubectl get clients.jumpstarter.dev -n "${JS_NAMESPACE}" test-client-legacy -o 'jsonpath={.status.endpoint}') \ + JMP_TOKEN=$(kubectl get secrets -n "${JS_NAMESPACE}" test-client-legacy-client -o 'jsonpath={.data.token}' | base64 -d) \ + jmp shell --selector example.com/board=oidc j power on +} + +@test "can operate on leases" { + wait_for_exporter + + jmp config client use test-client-oidc + + jmp create lease --selector example.com/board=oidc --duration 1d + jmp get leases + jmp get exporters + jmp delete leases --all +} + +@test "can lease and connect to exporters" { + wait_for_exporter + + jmp shell --client test-client-oidc --selector example.com/board=oidc j power on + jmp shell --client test-client-sa --selector example.com/board=sa j power on + jmp shell --client test-client-legacy --selector example.com/board=legacy j power on + + wait_for_exporter + jmp shell --client test-client-oidc-provisioning --selector example.com/board=oidc j power on +} + +@test "can get crds with admin cli" { + jmp admin get client --namespace "${JS_NAMESPACE}" + jmp admin get exporter --namespace "${JS_NAMESPACE}" + jmp admin get lease --namespace "${JS_NAMESPACE}" +} + +@test "can delete clients with admin cli" { + kubectl -n "${JS_NAMESPACE}" get secret test-client-oidc-client + kubectl -n "${JS_NAMESPACE}" get clients.jumpstarter.dev/test-client-oidc + kubectl -n "${JS_NAMESPACE}" get clients.jumpstarter.dev/test-client-sa + kubectl -n "${JS_NAMESPACE}" get clients.jumpstarter.dev/test-client-legacy + + jmp admin delete client --namespace "${JS_NAMESPACE}" test-client-oidc --delete + jmp admin delete client --namespace "${JS_NAMESPACE}" test-client-sa --delete + jmp admin delete client --namespace "${JS_NAMESPACE}" test-client-legacy --delete + + run ! kubectl -n "${JS_NAMESPACE}" get secret test-client-oidc-client + run ! kubectl -n "${JS_NAMESPACE}" get clients.jumpstarter.dev/test-client-oidc + run ! kubectl -n "${JS_NAMESPACE}" get clients.jumpstarter.dev/test-client-sa + run ! kubectl -n "${JS_NAMESPACE}" get clients.jumpstarter.dev/test-client-legacy +} + +@test "can delete exporters with admin cli" { + kubectl -n "${JS_NAMESPACE}" get secret test-exporter-oidc-exporter + kubectl -n "${JS_NAMESPACE}" get exporters.jumpstarter.dev/test-exporter-oidc + kubectl -n "${JS_NAMESPACE}" get exporters.jumpstarter.dev/test-exporter-sa + kubectl -n "${JS_NAMESPACE}" get exporters.jumpstarter.dev/test-exporter-legacy + + jmp admin delete exporter --namespace "${JS_NAMESPACE}" test-exporter-oidc --delete + jmp admin delete exporter --namespace "${JS_NAMESPACE}" test-exporter-sa --delete + jmp admin delete exporter --namespace "${JS_NAMESPACE}" test-exporter-legacy --delete + + run ! kubectl -n "${JS_NAMESPACE}" get secret test-exporter-oidc-exporter + run ! kubectl -n "${JS_NAMESPACE}" get exporters.jumpstarter.dev/test-exporter-oidc + run ! kubectl -n "${JS_NAMESPACE}" get exporters.jumpstarter.dev/test-exporter-sa + run ! kubectl -n "${JS_NAMESPACE}" get exporters.jumpstarter.dev/test-exporter-legacy +} diff --git a/e2e/values.kind.yaml b/e2e/values.kind.yaml new file mode 100644 index 000000000..95c45ce99 --- /dev/null +++ b/e2e/values.kind.yaml @@ -0,0 +1,23 @@ +global: + baseDomain: jumpstarter.127.0.0.1.nip.io + metrics: + enabled: false + +jumpstarter-controller: + grpc: + mode: "ingress" + config: + provisioning: + enabled: true + authentication: + jwt: + - issuer: + url: https://dex.dex.svc.cluster.local:5556 + audiences: + - jumpstarter-cli + audienceMatchPolicy: MatchAny + certificateAuthority: placeholder + claimMappings: + username: + claim: "name" + prefix: "dex:" diff --git a/import_pr.sh b/import_pr.sh new file mode 100755 index 000000000..cc83c389d --- /dev/null +++ b/import_pr.sh @@ -0,0 +1,459 @@ +#!/bin/bash +# +# import_pr.sh +# +# Imports a PR from an upstream Jumpstarter repository into the monorepo. +# This script fetches PR commits, generates patches, and applies them with +# the correct directory prefix for the monorepo structure. +# +# Usage: ./import_pr.sh +# +# Arguments: +# repo - One of: python, protocol, controller, e2e +# pr_number - The PR number from the upstream repository +# +# Example: +# ./import_pr.sh python 123 +# ./import_pr.sh controller 45 +# + +set -e + +# Configuration +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +# TEMP_DIR will be set later with repo name and PR number +TEMP_DIR="" +PATCH_DIR="" + +# Repository mapping function (compatible with bash 3.2+) +get_repo_info() { + local repo_name="$1" + case "$repo_name" in + python) + echo "jumpstarter-dev/jumpstarter python" + ;; + protocol) + echo "jumpstarter-dev/jumpstarter-protocol protocol" + ;; + controller) + echo "jumpstarter-dev/jumpstarter-controller controller" + ;; + e2e) + echo "jumpstarter-dev/jumpstarter-e2e e2e" + ;; + *) + echo "" + ;; + esac +} + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +# Logging functions +log_info() { + echo -e "${GREEN}[INFO]${NC} $1" +} + +log_warn() { + echo -e "${YELLOW}[WARN]${NC} $1" +} + +log_error() { + echo -e "${RED}[ERROR]${NC} $1" +} + +log_step() { + echo -e "${BLUE}[STEP]${NC} $1" +} + +# Cleanup function - only clean up on success +cleanup() { + local exit_code=$? + if [ $exit_code -eq 0 ]; then + if [ -n "${TEMP_DIR}" ] && [ -d "${TEMP_DIR}" ]; then + log_info "Cleaning up temporary directory..." + rm -rf "${TEMP_DIR}" + fi + else + if [ -n "${TEMP_DIR}" ] && [ -d "${TEMP_DIR}" ]; then + echo "" + log_warn "Script failed. Temporary directory preserved for debugging:" + log_warn " ${TEMP_DIR}" + echo "" + log_warn "To clean up manually after debugging:" + log_warn " rm -rf ${TEMP_DIR}" + fi + fi +} + +trap cleanup EXIT + +# Print usage +usage() { + echo "Usage: $0 " + echo "" + echo "Import a PR from an upstream repository into the monorepo." + echo "" + echo "Arguments:" + echo " repo - One of: python, protocol, controller, e2e" + echo " pr_number - The PR number from the upstream repository" + echo "" + echo "Examples:" + echo " $0 python 123 # Import PR #123 from jumpstarter repo" + echo " $0 controller 45 # Import PR #45 from controller repo" + echo "" + echo "Repository mappings:" + echo " python -> jumpstarter-dev/jumpstarter" + echo " protocol -> jumpstarter-dev/jumpstarter-protocol" + echo " controller -> jumpstarter-dev/jumpstarter-controller" + echo " e2e -> jumpstarter-dev/jumpstarter-e2e" + exit 1 +} + +# Check dependencies +check_dependencies() { + log_step "Checking dependencies..." + + if ! command -v git &> /dev/null; then + log_error "git is not installed. Please install git first." + exit 1 + fi + + if ! command -v gh &> /dev/null; then + log_error "gh (GitHub CLI) is not installed." + echo "Install it from: https://cli.github.com/" + exit 1 + fi + + # Check if gh is authenticated + if ! gh auth status &> /dev/null; then + log_error "gh is not authenticated. Please run 'gh auth login' first." + exit 1 + fi + + log_info "All dependencies found." +} + +# Validate arguments +validate_args() { + if [ $# -lt 2 ]; then + log_error "Missing arguments." + usage + fi + + local repo="$1" + local pr_number="$2" + + # Validate repo name + local repo_info + repo_info=$(get_repo_info "$repo") + if [ -z "$repo_info" ]; then + log_error "Invalid repository name: ${repo}" + echo "Valid options are: python, protocol, controller, e2e" + exit 1 + fi + + # Validate PR number is numeric + if ! [[ "$pr_number" =~ ^[0-9]+$ ]]; then + log_error "PR number must be a positive integer: ${pr_number}" + exit 1 + fi +} + +# Fetch PR information +fetch_pr_info() { + local github_repo="$1" + local pr_number="$2" + + log_step "Fetching PR #${pr_number} info from ${github_repo}..." + + # Get PR details as JSON + local pr_json + pr_json=$(gh pr view "${pr_number}" --repo "${github_repo}" --json title,baseRefName,headRefName,commits,state 2>&1) || { + log_error "Failed to fetch PR #${pr_number} from ${github_repo}" + echo "Make sure the PR exists and you have access to the repository." + exit 1 + } + + # Extract fields + PR_TITLE=$(echo "$pr_json" | jq -r '.title') + PR_BASE_BRANCH=$(echo "$pr_json" | jq -r '.baseRefName') + PR_HEAD_BRANCH=$(echo "$pr_json" | jq -r '.headRefName') + PR_COMMIT_COUNT=$(echo "$pr_json" | jq '.commits | length') + PR_STATE=$(echo "$pr_json" | jq -r '.state') + + log_info "PR Title: ${PR_TITLE}" + log_info "Base Branch: ${PR_BASE_BRANCH}" + log_info "Head Branch: ${PR_HEAD_BRANCH}" + log_info "Commits: ${PR_COMMIT_COUNT}" + log_info "State: ${PR_STATE}" +} + +# Clone repository and checkout PR +clone_and_checkout_pr() { + local github_repo="$1" + local pr_number="$2" + + log_step "Cloning repository and checking out PR..." + + # Create temp directory + mkdir -p "${TEMP_DIR}" + mkdir -p "${PATCH_DIR}" + + local clone_dir="${TEMP_DIR}/repo" + + # Clone the repository (full clone needed for patch generation) + log_info "Cloning ${github_repo}..." + gh repo clone "${github_repo}" "${clone_dir}" + + cd "${clone_dir}" + + # Checkout the PR + log_info "Checking out PR #${pr_number}..." + gh pr checkout "${pr_number}" --repo "${github_repo}" + + # Ensure we have the full history of both branches for finding merge base + log_info "Fetching base branch with full history..." + git fetch --unshallow origin "${PR_BASE_BRANCH}" 2>/dev/null || git fetch origin "${PR_BASE_BRANCH}" + + CLONE_DIR="${clone_dir}" +} + +# Generate patches for PR commits +generate_patches() { + log_step "Generating patches..." + + cd "${CLONE_DIR}" || { + log_error "Failed to cd to ${CLONE_DIR}" + exit 1 + } + + # Find the merge base between the PR branch and the base branch + local merge_base + if ! merge_base=$(git merge-base "origin/${PR_BASE_BRANCH}" HEAD 2>&1); then + log_error "Failed to find merge base: ${merge_base}" + exit 1 + fi + + log_info "Merge base: ${merge_base}" + + # Count all commits (including merges) + local total_commits + if ! total_commits=$(git rev-list --count "${merge_base}..HEAD" 2>&1); then + log_error "Failed to count commits: ${total_commits}" + exit 1 + fi + + # Count non-merge commits + local non_merge_commits + if ! non_merge_commits=$(git rev-list --count --no-merges "${merge_base}..HEAD" 2>&1); then + log_error "Failed to count non-merge commits: ${non_merge_commits}" + exit 1 + fi + + log_info "Total commits: ${total_commits} (${non_merge_commits} non-merge)" + + if [ "$non_merge_commits" -eq 0 ]; then + log_error "No non-merge commits found between merge base and HEAD." + exit 1 + fi + + # Check if there are merge commits + local merge_commits=$((total_commits - non_merge_commits)) + if [ "$merge_commits" -gt 0 ]; then + log_warn "PR contains ${merge_commits} merge commit(s) which will be skipped." + log_warn "Only the ${non_merge_commits} non-merge commits will be imported." + fi + + # Generate patches (skip merge commits) + log_info "Generating patches for non-merge commits..." + if ! git format-patch --no-merges -o "${PATCH_DIR}" "${merge_base}..HEAD"; then + log_error "Failed to generate patches." + exit 1 + fi + + # Count generated patches + PATCH_COUNT=$(find "${PATCH_DIR}" -name "*.patch" 2>/dev/null | wc -l | tr -d ' ') + + if [ "$PATCH_COUNT" -eq 0 ]; then + log_error "No patches were generated." + exit 1 + fi + + log_info "Generated ${PATCH_COUNT} patch file(s)." +} + +# Apply patches to monorepo +apply_patches() { + local subdir="$1" + local repo_name="$2" + local pr_number="$3" + + log_step "Applying patches to monorepo..." + + cd "${SCRIPT_DIR}" + + # Create branch name + local branch_name="import/${repo_name}-pr-${pr_number}" + + # Check if we're in a git repository + if ! git rev-parse --git-dir &> /dev/null; then + log_error "Not in a git repository. Please run this script from the monorepo root." + exit 1 + fi + + # Check for uncommitted changes + if ! git diff --quiet || ! git diff --cached --quiet; then + log_error "You have uncommitted changes. Please commit or stash them first." + exit 1 + fi + + # Check if branch already exists + if git show-ref --verify --quiet "refs/heads/${branch_name}"; then + log_error "Branch '${branch_name}' already exists." + echo "Delete it with: git branch -D ${branch_name}" + exit 1 + fi + + # Create and checkout new branch + log_info "Creating branch: ${branch_name}" + git checkout -b "${branch_name}" + + # Apply patches with directory prefix + log_info "Applying patches with directory prefix: ${subdir}/" + + local patch_files=("${PATCH_DIR}"/*.patch) + local applied=0 + local failed=0 + + for patch in "${patch_files[@]}"; do + if [ -f "$patch" ]; then + local patch_name + patch_name=$(basename "$patch") + if git am --directory="${subdir}" "$patch" 2>/dev/null; then + log_info "Applied: ${patch_name}" + ((applied++)) + else + log_error "Failed to apply: ${patch_name}" + ((failed++)) + # Abort the am session + git am --abort 2>/dev/null || true + break + fi + fi + done + + if [ "$failed" -gt 0 ]; then + log_error "Failed to apply ${failed} patch(es)." + echo "" + echo "Debug information:" + echo " - Patches directory: ${PATCH_DIR}" + echo " - Upstream repo clone: ${TEMP_DIR}/repo" + echo " - Current branch: ${branch_name}" + echo "" + echo "To investigate the failure:" + echo " 1. Examine the failed patch:" + echo " cat ${PATCH_DIR}/*.patch | less" + echo "" + echo " 2. Try applying manually to see the conflict:" + echo " git am --directory=${subdir} ${PATCH_DIR}/*.patch" + echo "" + echo " 3. If you want to start over:" + echo " git checkout main" + echo " git branch -D ${branch_name}" + echo " rm -rf ${TEMP_DIR}" + echo "" + exit 1 + fi + + APPLIED_COUNT=$applied +} + +# Print success message and next steps +print_success() { + local repo_name="$1" + local pr_number="$2" + local github_repo="$3" + local branch_name="import/${repo_name}-pr-${pr_number}" + + echo "" + echo -e "${GREEN}========================================${NC}" + echo -e "${GREEN} PR Import Successful!${NC}" + echo -e "${GREEN}========================================${NC}" + echo "" + echo "Summary:" + echo " - Source: ${github_repo}#${pr_number}" + echo " - Title: ${PR_TITLE}" + echo " - Branch: ${branch_name}" + echo " - Commits applied: ${APPLIED_COUNT}" + echo "" + echo "Next steps:" + echo " 1. Review the imported commits:" + echo " git log --oneline main..HEAD" + echo "" + echo " 2. Push the branch and create a PR on the monorepo:" + echo " git push -u origin ${branch_name}" + echo " gh pr create --title \"${PR_TITLE}\" --body \"Imported from ${github_repo}#${pr_number}\"" + echo "" + echo " 3. Or if you need to make changes first:" + echo " # Make your changes" + echo " git add -A && git commit --amend" + echo "" +} + +# Main execution +main() { + local repo_name="$1" + local pr_number="$2" + + echo "" + log_info "Starting PR import: ${repo_name} #${pr_number}" + echo "" + + # Validate arguments + validate_args "$@" + + # Set temp directory with descriptive name + TEMP_DIR="${SCRIPT_DIR}/.import-pr-temp-${repo_name}-${pr_number}" + PATCH_DIR="${TEMP_DIR}/patches" + + # Check dependencies + check_dependencies + echo "" + + # Parse repo mapping + local repo_info + repo_info=$(get_repo_info "$repo_name") + local github_repo subdir + read -r github_repo subdir <<< "${repo_info}" + + log_info "GitHub Repo: ${github_repo}" + log_info "Monorepo Subdir: ${subdir}/" + echo "" + + # Fetch PR info + fetch_pr_info "${github_repo}" "${pr_number}" + echo "" + + # Clone and checkout PR + clone_and_checkout_pr "${github_repo}" "${pr_number}" + echo "" + + # Generate patches + generate_patches + echo "" + + # Apply patches to monorepo + apply_patches "${subdir}" "${repo_name}" "${pr_number}" + echo "" + + # Print success message + print_success "${repo_name}" "${pr_number}" "${github_repo}" +} + +main "$@" diff --git a/protocol/LICENSE b/protocol/LICENSE new file mode 100644 index 000000000..9b5e4019d --- /dev/null +++ b/protocol/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/protocol/Makefile b/protocol/Makefile new file mode 100644 index 000000000..4df02346a --- /dev/null +++ b/protocol/Makefile @@ -0,0 +1,9 @@ +BUF_IMAGE=docker.io/bufbuild/buf:latest +BUF=podman run --volume "$(shell pwd):/workspace" --workdir /workspace docker.io/bufbuild/buf:latest + +all: lint + +lint: + $(BUF) lint + +.PHONY: lint diff --git a/protocol/README.md b/protocol/README.md new file mode 100644 index 000000000..6b3773a69 --- /dev/null +++ b/protocol/README.md @@ -0,0 +1,40 @@ +# Jumpstarter Protocol + +The Jumpstarter Protocol defines the gRPC-based communication layer for the [Jumpstarter](https://jumpstarter.dev) Hardware-in-the-Loop (HiL) ecosystem. It enables seamless, secure, and scalable interaction between clients, the Jumpstarter Service, and exporters—whether they are interfacing with physical or virtual hardware, locally or remotely. + +## Overview +Jumpstarter Protocol provides a unified gRPC interface for: + +- **Clients** to control and monitor remote/local hardware +- **Exporters** to expose hardware interfaces over gRPC +- **Jumpstarter Service** to route and manage connections + +Thanks to gRPC’s support for HTTP/2, streaming, and tunneling, the protocol works efficiently across enterprise networks, VPNs, and cloud environments. It appears as standard HTTPS traffic, making it compatible with existing security infrastructure. + +## Features +- 🔌 **Unified Interface:** Interact with virtual or physical hardware through a consistent API. +- 🔐 **Secure by Design:** Leverages gRPC over HTTPS for encrypted communication. +- 🌐 **Flexible Topology:** Supports direct or routed connections via the Jumpstarter Router. +- 📡 **Tunneling Support:** Can tunnel Unix sockets, TCP, and UDP connections over gRPC streams. + +## Related Projects + +- [**Jumpstarter Python:**](https://github.com/jumpstarter-dev/jumpstarter) The Python implementation of this protocol for clients and exporters. +- [**Jumpstarter Service:**](https://github.com/jumpstarter-dev/jumpstarter-controller) The Go implementation of this protocol as a Kubernetes controller. + + +## Documentation + +Jumpstarter's documentation is available at +[jumpstarter.dev](https://jumpstarter.dev). + +## Contributing + +Jumpstarter welcomes contributors of all levels of experience and would love to +see you involved in the project. See the [contributing +guide](https://jumpstarter.dev/contributing/) to get started. + +## License + +Jumpstarter is licensed under the Apache 2.0 License ([LICENSE](LICENSE) or +[https://www.apache.org/licenses/LICENSE-2.0](https://www.apache.org/licenses/LICENSE-2.0)). diff --git a/protocol/buf.lock b/protocol/buf.lock new file mode 100644 index 000000000..5eefbd0be --- /dev/null +++ b/protocol/buf.lock @@ -0,0 +1,6 @@ +# Generated by buf. DO NOT EDIT. +version: v2 +deps: + - name: buf.build/googleapis/googleapis + commit: 546238c53f7340c6a2a6099fb863bc1b + digest: b5:e017bbf31a3f912e2b969c03c3aa711f466cfe104f510865d1a8ede1be490240aabd4cca5865459a0f15222747284395f98afc094b0fd086e8917a5a7bdd9db0 diff --git a/protocol/buf.yaml b/protocol/buf.yaml new file mode 100644 index 000000000..450f315a8 --- /dev/null +++ b/protocol/buf.yaml @@ -0,0 +1,19 @@ +version: v2 +modules: + - path: proto +deps: + - buf.build/googleapis/googleapis +lint: + use: + - STANDARD + except: + - ENUM_ZERO_VALUE_SUFFIX + - FIELD_LOWER_SNAKE_CASE + - RPC_RESPONSE_STANDARD_NAME + - RPC_REQUEST_RESPONSE_UNIQUE + rpc_allow_same_request_response: true + rpc_allow_google_protobuf_empty_requests: true + rpc_allow_google_protobuf_empty_responses: true +breaking: + use: + - FILE diff --git a/protocol/proto/jumpstarter/client/v1/client.proto b/protocol/proto/jumpstarter/client/v1/client.proto new file mode 100644 index 000000000..5cc5e8897 --- /dev/null +++ b/protocol/proto/jumpstarter/client/v1/client.proto @@ -0,0 +1,172 @@ +// Copyright 2024 The Jumpstarter Authors +// (-- api-linter: core::0215::foreign-type-reference=disabled +// (-- api-linter: core::0192::has-comments=disabled +// (-- api-linter: core::0191::java-package=disabled +// (-- api-linter: core::0191::java-outer-classname=disabled +// (-- api-linter: core::0191::java-multiple-files=disabled + +syntax = "proto3"; + +package jumpstarter.client.v1; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/field_mask.proto"; +import "google/protobuf/timestamp.proto"; +import "jumpstarter/v1/kubernetes.proto"; +import "jumpstarter/v1/common.proto"; + +service ClientService { + rpc GetExporter(GetExporterRequest) returns (Exporter) { + option (google.api.http) = {get: "/v1/{name=namespaces/*/exporters/*}"}; + option (google.api.method_signature) = "name"; + } + rpc ListExporters(ListExportersRequest) returns (ListExportersResponse) { + option (google.api.http) = {get: "/v1/{parent=namespaces/*}/exporters"}; + option (google.api.method_signature) = "parent"; + } + + rpc GetLease(GetLeaseRequest) returns (Lease) { + option (google.api.http) = {get: "/v1/{name=namespaces/*/leases/*}"}; + option (google.api.method_signature) = "name"; + } + rpc ListLeases(ListLeasesRequest) returns (ListLeasesResponse) { + option (google.api.http) = {get: "/v1/{parent=namespaces/*}/leases"}; + option (google.api.method_signature) = "parent"; + } + rpc CreateLease(CreateLeaseRequest) returns (Lease) { + option (google.api.http) = { + post: "/v1/{parent=namespaces/*}/leases" + body: "lease" + }; + option (google.api.method_signature) = "parent,lease,lease_id"; + } + rpc UpdateLease(UpdateLeaseRequest) returns (Lease) { + option (google.api.http) = { + patch: "/v1/{lease.name=namespaces/*/leases/*}" + body: "lease" + }; + option (google.api.method_signature) = "lease,update_mask"; + } + rpc DeleteLease(DeleteLeaseRequest) returns (google.protobuf.Empty) { + option (google.api.http) = {delete: "/v1/{name=namespaces/*/leases/*}"}; + option (google.api.method_signature) = "name"; + } +} + +message Exporter { + option (google.api.resource) = { + type: "jumpstarter.dev/Exporter" + pattern: "namespaces/{namespace}/exporters/{exporter}" + singular: "exporter" + plural: "exporters" + }; + + string name = 1 [(google.api.field_behavior) = IDENTIFIER]; + map labels = 2; + bool online = 3 [(google.api.field_behavior) = OUTPUT_ONLY, deprecated = true]; + jumpstarter.v1.ExporterStatus status = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + string status_message = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +message Lease { + option (google.api.resource) = { + type: "jumpstarter.dev/Lease" + pattern: "namespaces/{namespace}/leases/{lease}" + singular: "lease" + plural: "leases" + }; + + string name = 1 [(google.api.field_behavior) = IDENTIFIER]; + + string selector = 2 [ + (google.api.field_behavior) = REQUIRED, + (google.api.field_behavior) = IMMUTABLE + ]; + optional google.protobuf.Duration duration = 3; + google.protobuf.Duration effective_duration = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + optional google.protobuf.Timestamp begin_time = 5; + optional google.protobuf.Timestamp effective_begin_time = 6 [(google.api.field_behavior) = OUTPUT_ONLY]; + optional google.protobuf.Timestamp end_time = 7; + optional google.protobuf.Timestamp effective_end_time = 8 [(google.api.field_behavior) = OUTPUT_ONLY]; + optional string client = 9 [ + (google.api.field_behavior) = OUTPUT_ONLY, + (google.api.resource_reference) = {type: "jumpstarter.dev/Client"} + ]; + optional string exporter = 10 [ + (google.api.field_behavior) = OUTPUT_ONLY, + (google.api.resource_reference) = {type: "jumpstarter.dev/Exporter"} + ]; + repeated jumpstarter.v1.Condition conditions = 11 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +message GetExporterRequest { + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = {type: "jumpstarter.dev/Exporter"} + ]; +} + +message ListExportersRequest { + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = {child_type: "jumpstarter.dev/Exporter"} + ]; + int32 page_size = 2 [(google.api.field_behavior) = OPTIONAL]; + string page_token = 3 [(google.api.field_behavior) = OPTIONAL]; + string filter = 4 [(google.api.field_behavior) = OPTIONAL]; +} + +message ListExportersResponse { + repeated Exporter exporters = 1; + string next_page_token = 2; +} + +message GetLeaseRequest { + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = {type: "jumpstarter.dev/Lease"} + ]; +} + +message ListLeasesRequest { + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = {child_type: "jumpstarter.dev/Lease"} + ]; + int32 page_size = 2 [(google.api.field_behavior) = OPTIONAL]; + string page_token = 3 [(google.api.field_behavior) = OPTIONAL]; + string filter = 4 [(google.api.field_behavior) = OPTIONAL]; + optional bool only_active = 5 [(google.api.field_behavior) = OPTIONAL]; +} + +message ListLeasesResponse { + repeated Lease leases = 1; + string next_page_token = 2; +} + +message CreateLeaseRequest { + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = {child_type: "jumpstarter.dev/Lease"} + ]; + + string lease_id = 2 [(google.api.field_behavior) = OPTIONAL]; + Lease lease = 3 [(google.api.field_behavior) = REQUIRED]; +} + +message UpdateLeaseRequest { + Lease lease = 1 [(google.api.field_behavior) = REQUIRED]; + google.protobuf.FieldMask update_mask = 2 [(google.api.field_behavior) = OPTIONAL]; +} + +message DeleteLeaseRequest { + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = {type: "jumpstarter.dev/Lease"} + ]; +} diff --git a/protocol/proto/jumpstarter/v1/common.proto b/protocol/proto/jumpstarter/v1/common.proto new file mode 100644 index 000000000..8748522e6 --- /dev/null +++ b/protocol/proto/jumpstarter/v1/common.proto @@ -0,0 +1,28 @@ +// Copyright 2024 The Jumpstarter Authors + +syntax = "proto3"; + +package jumpstarter.v1; + +// Shared types used across multiple Jumpstarter services + +// Exporter status information +enum ExporterStatus { + EXPORTER_STATUS_UNSPECIFIED = 0; // Unspecified exporter status + EXPORTER_STATUS_OFFLINE = 1; // Exporter is offline + EXPORTER_STATUS_AVAILABLE = 2; // Exporter is available to be leased + EXPORTER_STATUS_BEFORE_LEASE_HOOK = 3; // Exporter is executing before lease hook(s) + EXPORTER_STATUS_LEASE_READY = 4; // Exporter is leased and ready to accept commands + EXPORTER_STATUS_AFTER_LEASE_HOOK = 5; // Exporter is executing after lease hook(s) + EXPORTER_STATUS_BEFORE_LEASE_HOOK_FAILED = 6; // Exporter before lease hook failed + EXPORTER_STATUS_AFTER_LEASE_HOOK_FAILED = 7; // Exporter after lease hook failed +} + +// Source of log stream messages +enum LogSource { + LOG_SOURCE_UNSPECIFIED = 0; // Unspecified log source + LOG_SOURCE_DRIVER = 1; // Driver/device logs + LOG_SOURCE_BEFORE_LEASE_HOOK = 2; // beforeLease hook execution logs + LOG_SOURCE_AFTER_LEASE_HOOK = 3; // afterLease hook execution logs + LOG_SOURCE_SYSTEM = 4; // System/exporter logs +} \ No newline at end of file diff --git a/protocol/proto/jumpstarter/v1/jumpstarter.proto b/protocol/proto/jumpstarter/v1/jumpstarter.proto new file mode 100644 index 000000000..d72369d06 --- /dev/null +++ b/protocol/proto/jumpstarter/v1/jumpstarter.proto @@ -0,0 +1,226 @@ +// Copyright 2024 The Jumpstarter Authors + +syntax = "proto3"; + +package jumpstarter.v1; + +import "google/protobuf/duration.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/timestamp.proto"; +import "jumpstarter/v1/kubernetes.proto"; +import "jumpstarter/v1/common.proto"; + +// A service where a exporter can connect to make itself available +service ControllerService { + // Exporter registration + rpc Register(RegisterRequest) returns (RegisterResponse); + + // Exporter disconnection + // Disconnecting with bye will invalidate any existing router tokens + // we will eventually have a mechanism to tell the router this token + // has been invalidated + rpc Unregister(UnregisterRequest) returns (UnregisterResponse); + + // Exporter status report + // Allows exporters to report their own status to the controller + rpc ReportStatus(ReportStatusRequest) returns (ReportStatusResponse); + + // Exporter listening + // Returns stream tokens for accepting incoming client connections + rpc Listen(ListenRequest) returns (stream ListenResponse); + + // Exporter status + // Returns lease status for the exporter + rpc Status(StatusRequest) returns (stream StatusResponse); + + // Client connecting + // Returns stream token for connecting to the desired exporter + // Leases are checked before token issuance + rpc Dial(DialRequest) returns (DialResponse); + + // Audit events from the exporters + // audit events are used to track the exporter's activity + rpc AuditStream(stream AuditStreamRequest) returns (google.protobuf.Empty); + + // Get Lease + rpc GetLease(GetLeaseRequest) returns (GetLeaseResponse); + // Request Lease + rpc RequestLease(RequestLeaseRequest) returns (RequestLeaseResponse); + // Release Lease + rpc ReleaseLease(ReleaseLeaseRequest) returns (ReleaseLeaseResponse); + // List Leases + rpc ListLeases(ListLeasesRequest) returns (ListLeasesResponse); +} + +message RegisterRequest { + // additional context: + // - token/authentication mechanism + map labels = 1; + // standard labels: + // jumpstarter.dev/hostname= + // jumpstarter.dev/name= + repeated DriverInstanceReport reports = 2; +} + +message DriverInstanceReport { + string uuid = 1; // a unique id within the exporter + optional string parent_uuid = 2; // optional, if device has a parent device + map labels = 3; + optional string description = 4; // optional custom driver description for CLI + map methods_description = 5; // method name -> help text for CLI +} + +message RegisterResponse { + string uuid = 1; +} + +message UnregisterRequest { + string reason = 2; +} + +message UnregisterResponse {} + +message ListenRequest { + string lease_name = 1; +} + +message ListenResponse { + string router_endpoint = 1; + string router_token = 2; +} + +message StatusRequest {} + +message StatusResponse { + bool leased = 1; + optional string lease_name = 2; + optional string client_name = 3; +} + +message DialRequest { + string lease_name = 1; +} + +message DialResponse { + string router_endpoint = 1; + string router_token = 2; +} + +message AuditStreamRequest { + // additional context: + // - token/authentication mechanism + string exporter_uuid = 1; + string driver_instance_uuid = 2; + string severity = 3; + string message = 4; +} + +message ReportStatusRequest { + ExporterStatus status = 1; + optional string message = 2; // Optional human-readable status message +} + +message ReportStatusResponse {} + +// A service a exporter can share locally to be used without a server +// Channel/Call credentials are used to authenticate the client, and routing to the right exporter +service ExporterService { + // Exporter registration + rpc GetReport(google.protobuf.Empty) returns (GetReportResponse); + rpc DriverCall(DriverCallRequest) returns (DriverCallResponse); + rpc StreamingDriverCall(StreamingDriverCallRequest) returns (stream StreamingDriverCallResponse); + rpc LogStream(google.protobuf.Empty) returns (stream LogStreamResponse); + rpc Reset(ResetRequest) returns (ResetResponse); + rpc GetStatus(GetStatusRequest) returns (GetStatusResponse); +} + +message GetReportResponse { + string uuid = 1; + map labels = 2; + // standard labels: + // jumpstarter.dev/hostname= + // jumpstarter.dev/name= + repeated DriverInstanceReport reports = 3; + repeated Endpoint alternative_endpoints = 4; +} + +message Endpoint { + string endpoint = 1; + string certificate = 2; + string client_certificate = 3; + string client_private_key = 4; +} + +message DriverCallRequest { + string uuid = 1; + string method = 2; + repeated google.protobuf.Value args = 3; +} + +message DriverCallResponse { + string uuid = 1; + google.protobuf.Value result = 2; +} + +message StreamingDriverCallRequest { + string uuid = 1; + string method = 2; + repeated google.protobuf.Value args = 3; +} + +message StreamingDriverCallResponse { + string uuid = 1; + google.protobuf.Value result = 2; +} + +message LogStreamResponse { + string uuid = 1; + string severity = 2; + string message = 3; + optional LogSource source = 4; // New optional field +} + +message ResetRequest {} +message ResetResponse {} + +message GetLeaseRequest { + string name = 1; +} + +message GetLeaseResponse { + google.protobuf.Duration duration = 1; + LabelSelector selector = 2; + optional google.protobuf.Timestamp begin_time = 3; + optional google.protobuf.Timestamp end_time = 4; + optional string exporter_uuid = 5; + repeated Condition conditions = 6; +} + +message RequestLeaseRequest { + google.protobuf.Duration duration = 1; + LabelSelector selector = 2; +} + +message RequestLeaseResponse { + string name = 1; +} + +message ReleaseLeaseRequest { + string name = 1; +} + +message ReleaseLeaseResponse {} + +message ListLeasesRequest {} + +message ListLeasesResponse { + repeated string names = 1; +} + +message GetStatusRequest {} + +message GetStatusResponse { + ExporterStatus status = 1; + optional string message = 2; +} \ No newline at end of file diff --git a/protocol/proto/jumpstarter/v1/kubernetes.proto b/protocol/proto/jumpstarter/v1/kubernetes.proto new file mode 100644 index 000000000..541410eed --- /dev/null +++ b/protocol/proto/jumpstarter/v1/kubernetes.proto @@ -0,0 +1,32 @@ +// Copyright 2024 The Jumpstarter Authors + +syntax = "proto3"; + +package jumpstarter.v1; + +message LabelSelectorRequirement { + string key = 1; + string operator = 2; + repeated string values = 3; +} + +// Reference: https://kubernetes.io/docs/reference/kubernetes-api/common-definitions/label-selector/ +message LabelSelector { + repeated LabelSelectorRequirement match_expressions = 1; + map match_labels = 2; +} + +// Reference: https://github.com/kubernetes/kubernetes/blob/v1.31.1/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto +message Time { + optional int64 seconds = 1; + optional int32 nanos = 2; +} + +message Condition { + optional string type = 1; + optional string status = 2; + optional int64 observedGeneration = 3; + optional Time lastTransitionTime = 4; + optional string reason = 5; + optional string message = 6; +} diff --git a/protocol/proto/jumpstarter/v1/router.proto b/protocol/proto/jumpstarter/v1/router.proto new file mode 100644 index 000000000..98458aeeb --- /dev/null +++ b/protocol/proto/jumpstarter/v1/router.proto @@ -0,0 +1,33 @@ +// Copyright 2024 The Jumpstarter Authors + +syntax = "proto3"; + +package jumpstarter.v1; + +// StreamService +// Claims: +// iss: jumpstarter controller +// aud: jumpstarter router +// sub: jumpstarter client/exporter +// stream: stream id +service RouterService { + // Stream connects caller to another caller of the same stream + rpc Stream(stream StreamRequest) returns (stream StreamResponse); +} + +enum FrameType { + FRAME_TYPE_DATA = 0x00; + FRAME_TYPE_RST_STREAM = 0x03; + FRAME_TYPE_PING = 0x06; + FRAME_TYPE_GOAWAY = 0x07; +} + +message StreamRequest { + bytes payload = 1; + FrameType frame_type = 2; +} + +message StreamResponse { + bytes payload = 1; + FrameType frame_type = 2; +} diff --git a/python/.devfile/Containerfile.client b/python/.devfile/Containerfile.client index 6dbba09e2..3eabc5df4 100644 --- a/python/.devfile/Containerfile.client +++ b/python/.devfile/Containerfile.client @@ -6,7 +6,7 @@ RUN dnf install -y make git && \ rm -rf /var/cache/dnf COPY --from=uv /uv /uvx /bin/ ADD . /src -RUN make -C /src build +RUN make -C /src/python build FROM quay.io/devfile/base-developer-image:ubi9-latest @@ -28,7 +28,7 @@ RUN dnf -y install make git python3.12 python3.12 libusbx python3-pyusb python3. USER 10001 -RUN --mount=from=builder,source=/src/dist,target=/dist python3.12 -m pip install /dist/*.whl +RUN --mount=from=builder,source=/src/python/dist,target=/dist python3.12 -m pip install /dist/*.whl RUN python3.12 -m pip install pytest diff --git a/python/.github/dependabot.yml b/python/.github/dependabot.yml deleted file mode 100644 index f33a02cd1..000000000 --- a/python/.github/dependabot.yml +++ /dev/null @@ -1,12 +0,0 @@ -# To get started with Dependabot version updates, you'll need to specify which -# package ecosystems to update and where the package manifests are located. -# Please see the documentation for more information: -# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates -# https://containers.dev/guide/dependabot - -version: 2 -updates: - - package-ecosystem: "devcontainers" - directory: "/" - schedule: - interval: weekly diff --git a/python/.github/workflows/build.yaml b/python/.github/workflows/build.yaml deleted file mode 100644 index f8a2520b6..000000000 --- a/python/.github/workflows/build.yaml +++ /dev/null @@ -1,111 +0,0 @@ -name: Build and push container image -on: - workflow_dispatch: - push: - branches: - - main - - release-* - tags: - - v* - merge_group: - -env: - PUSH: ${{ github.repository_owner == 'jumpstarter-dev' && (github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/') || startsWith(github.ref, 'refs/heads/release-')) }} - REGISTRY: quay.io - QUAY_ORG: quay.io/jumpstarter-dev - -jobs: - build-and-push-image: - strategy: - matrix: - image: - - jumpstarter-dev/jumpstarter Dockerfile - - jumpstarter-dev/jumpstarter-utils Dockerfile.utils - - jumpstarter-dev/jumpstarter-dev .devfile/Containerfile - - jumpstarter-dev/jumpstarter-devspace .devfile/Containerfile.client - runs-on: ubuntu-latest - permissions: - contents: read - packages: write - attestations: write - id-token: write - steps: - - name: Checkout repository - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - name: Get image name and container file - run: | - IMAGE="${{ matrix.image }}" - IMAGE_NAME=$(echo $IMAGE | awk '{print $1}') - CONTAINERFILE=$(echo $IMAGE | awk '{print $2}') - echo "IMAGE_NAME=${IMAGE_NAME}" >> $GITHUB_ENV - echo "IMAGE_NAME=${IMAGE_NAME}" - echo "CONTAINERFILE=${CONTAINERFILE}" >> $GITHUB_ENV - echo "CONTAINERFILE=${CONTAINERFILE}" - - - name: Get version - if: ${{ env.PUSH == 'true' }} - run: | - VERSION=$(git describe --tags) - VERSION=${VERSION#v} # remove the leading v prefix for version - echo "VERSION=${VERSION}" >> $GITHUB_ENV - echo "VERSION=${VERSION}" - - - name: Set image tags - if: ${{ env.PUSH == 'true' }} - id: set-tags - run: | - TAGS="${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ env.VERSION }}" - - if [[ "${{ github.ref }}" == "refs/heads/main" ]]; then - TAGS="$TAGS,${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:latest" - fi - - if [[ "${{ github.ref }}" == refs/heads/release-* ]]; then - RELEASE_BRANCH_NAME=$(basename "${{ github.ref }}") - TAGS="$TAGS,${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${RELEASE_BRANCH_NAME}" - fi - - echo "tags=$TAGS" >> $GITHUB_OUTPUT - - - name: Set up QEMU - uses: docker/setup-qemu-action@v3 - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - - - name: Log in to the Container registry - uses: docker/login-action@v3 - if: ${{ env.PUSH == 'true' }} - with: - registry: ${{ env.REGISTRY }} - username: jumpstarter-dev+jumpstarter_ci - password: ${{ secrets.QUAY_TOKEN }} - - - name: Extract metadata (tags, labels) for Docker - id: meta - uses: docker/metadata-action@v5 - with: - images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} - - - name: Build and push Docker image - id: push - uses: docker/build-push-action@v6 - with: - context: . - file: ${{ env.CONTAINERFILE }} - push: ${{ env.PUSH }} - tags: ${{ steps.set-tags.outputs.tags }} - labels: ${{ steps.meta.outputs.labels }} - platforms: linux/amd64,linux/arm64 - cache-from: type=gha - cache-to: type=gha,mode=max - - - name: Generate artifact attestation - uses: actions/attest-build-provenance@v1 - if: ${{ env.PUSH == 'true' }} - with: - subject-name: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} - subject-digest: ${{ steps.push.outputs.digest }} - push-to-registry: ${{ env.PUSH }} diff --git a/python/.github/workflows/e2e.yaml b/python/.github/workflows/e2e.yaml deleted file mode 100644 index 0e79a657d..000000000 --- a/python/.github/workflows/e2e.yaml +++ /dev/null @@ -1,24 +0,0 @@ -name: "Run E2E Tests" -on: - workflow_dispatch: - push: - branches: - - main - - release-* - pull_request: - merge_group: - -permissions: - contents: read - -jobs: - e2e: - if: github.repository_owner == 'jumpstarter-dev' - runs-on: ubuntu-latest - timeout-minutes: 60 - continue-on-error: false - steps: - - uses: jumpstarter-dev/jumpstarter-e2e@main - with: - controller-ref: main - jumpstarter-ref: ${{ github.ref }} diff --git a/python/.github/workflows/ruff.yaml b/python/.github/workflows/ruff.yaml deleted file mode 100644 index 9028ec4b2..000000000 --- a/python/.github/workflows/ruff.yaml +++ /dev/null @@ -1,23 +0,0 @@ -name: Lint - -on: - workflow_dispatch: - push: - branches: - - main - - release-* - pull_request: - merge_group: - -permissions: - contents: read - -jobs: - ruff: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - name: Run ruff - uses: astral-sh/ruff-action@84f83ecf9e1e15d26b7984c7ec9cf73d39ffc946 # v3.3.1 - with: - version-file: pyproject.toml diff --git a/python/.github/workflows/typos.yaml b/python/.github/workflows/typos.yaml deleted file mode 100644 index 33a087163..000000000 --- a/python/.github/workflows/typos.yaml +++ /dev/null @@ -1,21 +0,0 @@ -name: Spell Check - -on: - workflow_dispatch: - push: - branches: - - main - - release-* - pull_request: - merge_group: - -permissions: - contents: read - -jobs: - typos: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - name: Run typos - uses: crate-ci/typos@0f0ccba9ed1df83948f0c15026e4f5ccfce46109 # v1.32.0 diff --git a/python/Dockerfile b/python/Dockerfile index 5b2ffb481..b67061d10 100644 --- a/python/Dockerfile +++ b/python/Dockerfile @@ -7,17 +7,17 @@ RUN dnf install -y make git && \ COPY --from=uv /uv /uvx /bin/ FROM fedora:42 AS product -RUN dnf install -y python3 ustreamer libusb1 android-tools python3-libgpiod && \ +RUN dnf install -y python3 ustreamer libusb1 android-tools python3-libgpiod sigrok-cli && \ dnf clean all && \ rm -rf /var/cache/dnf COPY --from=ghcr.io/astral-sh/uv:latest /uv /uvx /bin/ FROM builder AS wheels ADD . /src -RUN make -C /src build +RUN make -C /src/python build FROM product -RUN --mount=from=wheels,source=/src/dist,target=/dist \ +RUN --mount=from=wheels,source=/src/python/dist,target=/dist \ uv venv /jumpstarter && \ VIRTUAL_ENV=/jumpstarter uv pip install /dist/*.whl ENV PATH="/jumpstarter/bin:$PATH" diff --git a/python/__templates__/driver/pyproject.toml.tmpl b/python/__templates__/driver/pyproject.toml.tmpl index e7dbb11ee..71d1643c5 100644 --- a/python/__templates__/driver/pyproject.toml.tmpl +++ b/python/__templates__/driver/pyproject.toml.tmpl @@ -15,7 +15,7 @@ dependencies = [ [tool.hatch.version] source = "vcs" -raw-options = { 'root' = '../../'} +raw-options = { 'root' = '../../../'} [tool.hatch.metadata.hooks.vcs.urls] Homepage = "https://jumpstarter.dev" diff --git a/python/docs/multiversion.sh b/python/docs/multiversion.sh index 02291d373..10c2f11b5 100755 --- a/python/docs/multiversion.sh +++ b/python/docs/multiversion.sh @@ -1,7 +1,7 @@ #!/usr/bin/env bash set -euox pipefail -declare -a BRANCHES=("main" "release-0.5" "release-0.6" "release-0.7") +declare -a BRANCHES=("main" "release-0.6" "release-0.7") # https://stackoverflow.com/a/246128 SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) @@ -15,10 +15,10 @@ for BRANCH in "${BRANCHES[@]}"; do git worktree add --force "${WORKTREE}" "${BRANCH}" - uv run --project "${WORKTREE}" --isolated --all-packages --group docs \ - make -C "${WORKTREE}/docs" html SPHINXOPTS="-D version=${BRANCH}" + uv run --project "${WORKTREE}/python" --isolated --all-packages --group docs \ + make -C "${WORKTREE}/python/docs" html SPHINXOPTS="-D version=${BRANCH}" - cp -r "${WORKTREE}/docs/build/html" "${OUTPUT_DIR}/${BRANCH}" + cp -r "${WORKTREE}/python/docs/build/html" "${OUTPUT_DIR}/${BRANCH}" git worktree remove --force "${WORKTREE}" done diff --git a/python/docs/source/reference/package-apis/drivers/index.md b/python/docs/source/reference/package-apis/drivers/index.md index 2a5e173cc..c1db1e0ea 100644 --- a/python/docs/source/reference/package-apis/drivers/index.md +++ b/python/docs/source/reference/package-apis/drivers/index.md @@ -72,6 +72,8 @@ Drivers for debugging and programming devices: * **[QEMU](qemu.md)** (`jumpstarter-driver-qemu`) - QEMU virtualization platform * **[Corellium](corellium.md)** (`jumpstarter-driver-corellium`) - Corellium virtualization platform +* **[Sigrok](sigrok.md)** (`jumpstarter-driver-sigrok`) - Logic analyzer and + oscilloscope support via sigrok-cli * **[U-Boot](uboot.md)** (`jumpstarter-driver-uboot`) - Universal Bootloader interface * **[RideSX](ridesx.md)** (`jumpstarter-driver-ridesx`) - Flashing and power management for Qualcomm RideSX devices @@ -105,6 +107,7 @@ gpiod.md ridesx.md sdwire.md shell.md +sigrok.md ssh.md snmp.md tasmota.md diff --git a/python/docs/source/reference/package-apis/drivers/sigrok.md b/python/docs/source/reference/package-apis/drivers/sigrok.md new file mode 120000 index 000000000..979eeb03f --- /dev/null +++ b/python/docs/source/reference/package-apis/drivers/sigrok.md @@ -0,0 +1 @@ +../../../../../packages/jumpstarter-driver-sigrok/README.md \ No newline at end of file diff --git a/python/packages/jumpstarter-all/pyproject.toml b/python/packages/jumpstarter-all/pyproject.toml index a071b28cc..036e4ee46 100644 --- a/python/packages/jumpstarter-all/pyproject.toml +++ b/python/packages/jumpstarter-all/pyproject.toml @@ -52,7 +52,7 @@ source_archive = "https://github.com/jumpstarter-dev/repo/archive/{commit_hash}. [tool.hatch.version] source = "vcs" -raw-options = { 'root' = '../../' } +raw-options = { 'root' = '../../../' } [build-system] requires = ["hatchling", "hatch-vcs", "hatch-pin-jumpstarter"] diff --git a/python/packages/jumpstarter-cli-admin/pyproject.toml b/python/packages/jumpstarter-cli-admin/pyproject.toml index c8278da56..886d97675 100644 --- a/python/packages/jumpstarter-cli-admin/pyproject.toml +++ b/python/packages/jumpstarter-cli-admin/pyproject.toml @@ -32,7 +32,7 @@ source_archive = "https://github.com/jumpstarter-dev/repo/archive/{commit_hash}. [tool.hatch.version] source = "vcs" -raw-options = { 'root' = '../../' } +raw-options = { 'root' = '../../../' } [build-system] requires = ["hatchling", "hatch-vcs", "hatch-pin-jumpstarter"] diff --git a/python/packages/jumpstarter-cli-common/pyproject.toml b/python/packages/jumpstarter-cli-common/pyproject.toml index a52e3d0c2..e892545fa 100644 --- a/python/packages/jumpstarter-cli-common/pyproject.toml +++ b/python/packages/jumpstarter-cli-common/pyproject.toml @@ -34,7 +34,7 @@ source_archive = "https://github.com/jumpstarter-dev/repo/archive/{commit_hash}. [tool.hatch.version] source = "vcs" -raw-options = { 'root' = '../../' } +raw-options = { 'root' = '../../../' } [build-system] requires = ["hatchling", "hatch-vcs", "hatch-pin-jumpstarter"] diff --git a/python/packages/jumpstarter-cli-driver/pyproject.toml b/python/packages/jumpstarter-cli-driver/pyproject.toml index c1856a505..db35f025d 100644 --- a/python/packages/jumpstarter-cli-driver/pyproject.toml +++ b/python/packages/jumpstarter-cli-driver/pyproject.toml @@ -32,7 +32,7 @@ source_archive = "https://github.com/jumpstarter-dev/repo/archive/{commit_hash}. [tool.hatch.version] source = "vcs" -raw-options = { 'root' = '../../' } +raw-options = { 'root' = '../../../' } [build-system] requires = ["hatchling", "hatch-vcs", "hatch-pin-jumpstarter"] diff --git a/python/packages/jumpstarter-cli/pyproject.toml b/python/packages/jumpstarter-cli/pyproject.toml index 8d8afa4a8..f05a03e02 100644 --- a/python/packages/jumpstarter-cli/pyproject.toml +++ b/python/packages/jumpstarter-cli/pyproject.toml @@ -37,7 +37,7 @@ source_archive = "https://github.com/jumpstarter-dev/repo/archive/{commit_hash}. [tool.hatch.version] source = "vcs" -raw-options = { 'root' = '../../' } +raw-options = { 'root' = '../../../' } [build-system] requires = ["hatchling", "hatch-vcs", "hatch-pin-jumpstarter"] diff --git a/python/packages/jumpstarter-driver-ble/pyproject.toml b/python/packages/jumpstarter-driver-ble/pyproject.toml index 6f03d3e7f..13d50eebd 100644 --- a/python/packages/jumpstarter-driver-ble/pyproject.toml +++ b/python/packages/jumpstarter-driver-ble/pyproject.toml @@ -14,7 +14,7 @@ dependencies = [ [tool.hatch.version] source = "vcs" -raw-options = { 'root' = '../../' } +raw-options = { 'root' = '../../../' } [tool.hatch.metadata.hooks.vcs.urls] Homepage = "https://jumpstarter.dev" diff --git a/python/packages/jumpstarter-driver-can/pyproject.toml b/python/packages/jumpstarter-driver-can/pyproject.toml index 97b4b463a..c00e80380 100644 --- a/python/packages/jumpstarter-driver-can/pyproject.toml +++ b/python/packages/jumpstarter-driver-can/pyproject.toml @@ -26,7 +26,7 @@ source_archive = "https://github.com/jumpstarter-dev/repo/archive/{commit_hash}. [tool.hatch.version] source = "vcs" -raw-options = { 'root' = '../../' } +raw-options = { 'root' = '../../../' } [build-system] requires = ["hatchling", "hatch-vcs", "hatch-pin-jumpstarter"] diff --git a/python/packages/jumpstarter-driver-composite/pyproject.toml b/python/packages/jumpstarter-driver-composite/pyproject.toml index bd1e25e58..0fe9e9995 100644 --- a/python/packages/jumpstarter-driver-composite/pyproject.toml +++ b/python/packages/jumpstarter-driver-composite/pyproject.toml @@ -24,7 +24,7 @@ source_archive = "https://github.com/jumpstarter-dev/repo/archive/{commit_hash}. [tool.hatch.version] source = "vcs" -raw-options = { 'root' = '../../' } +raw-options = { 'root' = '../../../' } [build-system] requires = ["hatchling", "hatch-vcs", "hatch-pin-jumpstarter"] diff --git a/python/packages/jumpstarter-driver-corellium/pyproject.toml b/python/packages/jumpstarter-driver-corellium/pyproject.toml index 05810dcc1..e0bd6f69e 100644 --- a/python/packages/jumpstarter-driver-corellium/pyproject.toml +++ b/python/packages/jumpstarter-driver-corellium/pyproject.toml @@ -27,7 +27,7 @@ source_archive = "https://github.com/jumpstarter-dev/repo/archive/{commit_hash}. [tool.hatch.version] source = "vcs" -raw-options = { 'root' = '../../' } +raw-options = { 'root' = '../../../' } [build-system] requires = ["hatchling", "hatch-vcs", "hatch-pin-jumpstarter"] diff --git a/python/packages/jumpstarter-driver-dutlink/pyproject.toml b/python/packages/jumpstarter-driver-dutlink/pyproject.toml index acc77f4e8..7e81dc4e3 100644 --- a/python/packages/jumpstarter-driver-dutlink/pyproject.toml +++ b/python/packages/jumpstarter-driver-dutlink/pyproject.toml @@ -37,7 +37,7 @@ source_archive = "https://github.com/jumpstarter-dev/repo/archive/{commit_hash}. [tool.hatch.version] source = "vcs" -raw-options = { 'root' = '../../' } +raw-options = { 'root' = '../../../' } [build-system] requires = ["hatchling", "hatch-vcs", "hatch-pin-jumpstarter"] diff --git a/python/packages/jumpstarter-driver-energenie/pyproject.toml b/python/packages/jumpstarter-driver-energenie/pyproject.toml index 0e5f22e1c..4aa67ab06 100644 --- a/python/packages/jumpstarter-driver-energenie/pyproject.toml +++ b/python/packages/jumpstarter-driver-energenie/pyproject.toml @@ -30,7 +30,7 @@ source_archive = "https://github.com/jumpstarter-dev/repo/archive/{commit_hash}. [tool.hatch.version] source = "vcs" -raw-options = { 'root' = '../../'} +raw-options = { 'root' = '../../../'} [build-system] requires = ["hatchling", "hatch-vcs"] diff --git a/python/packages/jumpstarter-driver-flashers/pyproject.toml b/python/packages/jumpstarter-driver-flashers/pyproject.toml index 2127a6fae..26db295b4 100644 --- a/python/packages/jumpstarter-driver-flashers/pyproject.toml +++ b/python/packages/jumpstarter-driver-flashers/pyproject.toml @@ -23,7 +23,7 @@ dependencies = [ [tool.hatch.version] source = "vcs" -raw-options = { 'root' = '../../' } +raw-options = { 'root' = '../../../' } [tool.hatch.metadata.hooks.vcs.urls] Homepage = "https://jumpstarter.dev" diff --git a/python/packages/jumpstarter-driver-gpiod/pyproject.toml b/python/packages/jumpstarter-driver-gpiod/pyproject.toml index 5fb933d80..ff5ca6edc 100644 --- a/python/packages/jumpstarter-driver-gpiod/pyproject.toml +++ b/python/packages/jumpstarter-driver-gpiod/pyproject.toml @@ -26,7 +26,7 @@ source_archive = "https://github.com/jumpstarter-dev/repo/archive/{commit_hash}. [tool.hatch.version] source = "vcs" -raw-options = { 'root' = '../../' } +raw-options = { 'root' = '../../../' } [build-system] requires = ["hatchling", "hatch-vcs", "hatch-pin-jumpstarter"] diff --git a/python/packages/jumpstarter-driver-http-power/pyproject.toml b/python/packages/jumpstarter-driver-http-power/pyproject.toml index 5791f9a25..39b5f8cbc 100644 --- a/python/packages/jumpstarter-driver-http-power/pyproject.toml +++ b/python/packages/jumpstarter-driver-http-power/pyproject.toml @@ -17,7 +17,7 @@ dependencies = [ [tool.hatch.version] source = "vcs" -raw-options = { 'root' = '../../' } +raw-options = { 'root' = '../../../' } [tool.hatch.metadata.hooks.vcs.urls] Homepage = "https://jumpstarter.dev" diff --git a/python/packages/jumpstarter-driver-http/pyproject.toml b/python/packages/jumpstarter-driver-http/pyproject.toml index 55d6053f6..06cd3553f 100644 --- a/python/packages/jumpstarter-driver-http/pyproject.toml +++ b/python/packages/jumpstarter-driver-http/pyproject.toml @@ -16,7 +16,7 @@ dependencies = [ [tool.hatch.version] source = "vcs" -raw-options = { 'root' = '../../' } +raw-options = { 'root' = '../../../' } [tool.hatch.metadata.hooks.vcs.urls] Homepage = "https://jumpstarter.dev" diff --git a/python/packages/jumpstarter-driver-iscsi/pyproject.toml b/python/packages/jumpstarter-driver-iscsi/pyproject.toml index e8ee9cc7e..a591b6a04 100644 --- a/python/packages/jumpstarter-driver-iscsi/pyproject.toml +++ b/python/packages/jumpstarter-driver-iscsi/pyproject.toml @@ -17,7 +17,7 @@ dependencies = [ [tool.hatch.version] source = "vcs" -raw-options = { 'root' = '../../' } +raw-options = { 'root' = '../../../' } [tool.hatch.metadata.hooks.vcs.urls] Homepage = "https://jumpstarter.dev" diff --git a/python/packages/jumpstarter-driver-network/pyproject.toml b/python/packages/jumpstarter-driver-network/pyproject.toml index fcf695120..b02595109 100644 --- a/python/packages/jumpstarter-driver-network/pyproject.toml +++ b/python/packages/jumpstarter-driver-network/pyproject.toml @@ -46,7 +46,7 @@ source_archive = "https://github.com/jumpstarter-dev/repo/archive/{commit_hash}. [tool.hatch.version] source = "vcs" -raw-options = { 'root' = '../../' } +raw-options = { 'root' = '../../../' } [build-system] requires = ["hatchling", "hatch-vcs", "hatch-pin-jumpstarter"] diff --git a/python/packages/jumpstarter-driver-opendal/pyproject.toml b/python/packages/jumpstarter-driver-opendal/pyproject.toml index 316e18105..7fe90c9fc 100644 --- a/python/packages/jumpstarter-driver-opendal/pyproject.toml +++ b/python/packages/jumpstarter-driver-opendal/pyproject.toml @@ -23,7 +23,7 @@ source_archive = "https://github.com/jumpstarter-dev/repo/archive/{commit_hash}. [tool.hatch.version] source = "vcs" -raw-options = { 'root' = '../../' } +raw-options = { 'root' = '../../../' } [build-system] requires = ["hatchling", "hatch-vcs", "hatch-pin-jumpstarter"] diff --git a/python/packages/jumpstarter-driver-power/pyproject.toml b/python/packages/jumpstarter-driver-power/pyproject.toml index 82ae4d2b3..1106dded0 100644 --- a/python/packages/jumpstarter-driver-power/pyproject.toml +++ b/python/packages/jumpstarter-driver-power/pyproject.toml @@ -23,7 +23,7 @@ source_archive = "https://github.com/jumpstarter-dev/repo/archive/{commit_hash}. [tool.hatch.version] source = "vcs" -raw-options = { 'root' = '../../' } +raw-options = { 'root' = '../../../' } [build-system] requires = ["hatchling", "hatch-vcs", "hatch-pin-jumpstarter"] diff --git a/python/packages/jumpstarter-driver-probe-rs/pyproject.toml b/python/packages/jumpstarter-driver-probe-rs/pyproject.toml index 16243c0bb..56fe83c9b 100644 --- a/python/packages/jumpstarter-driver-probe-rs/pyproject.toml +++ b/python/packages/jumpstarter-driver-probe-rs/pyproject.toml @@ -15,7 +15,7 @@ dependencies = [ [tool.hatch.version] source = "vcs" -raw-options = { 'root' = '../../' } +raw-options = { 'root' = '../../../' } [tool.hatch.metadata.hooks.vcs.urls] Homepage = "https://jumpstarter.dev" diff --git a/python/packages/jumpstarter-driver-pyserial/pyproject.toml b/python/packages/jumpstarter-driver-pyserial/pyproject.toml index 24b8db141..20792b73d 100644 --- a/python/packages/jumpstarter-driver-pyserial/pyproject.toml +++ b/python/packages/jumpstarter-driver-pyserial/pyproject.toml @@ -31,7 +31,7 @@ source_archive = "https://github.com/jumpstarter-dev/repo/archive/{commit_hash}. [tool.hatch.version] source = "vcs" -raw-options = { 'root' = '../../' } +raw-options = { 'root' = '../../../' } [build-system] requires = ["hatchling", "hatch-vcs", "hatch-pin-jumpstarter"] diff --git a/python/packages/jumpstarter-driver-qemu/pyproject.toml b/python/packages/jumpstarter-driver-qemu/pyproject.toml index c44f78ec8..3c77f05e6 100644 --- a/python/packages/jumpstarter-driver-qemu/pyproject.toml +++ b/python/packages/jumpstarter-driver-qemu/pyproject.toml @@ -30,7 +30,7 @@ source_archive = "https://github.com/jumpstarter-dev/repo/archive/{commit_hash}. [tool.hatch.version] source = "vcs" -raw-options = { 'root' = '../../' } +raw-options = { 'root' = '../../../' } [tool.uv.sources] jumpstarter-driver-opendal = { workspace = true } diff --git a/python/packages/jumpstarter-driver-ridesx/pyproject.toml b/python/packages/jumpstarter-driver-ridesx/pyproject.toml index 567d25c97..29bd45a22 100644 --- a/python/packages/jumpstarter-driver-ridesx/pyproject.toml +++ b/python/packages/jumpstarter-driver-ridesx/pyproject.toml @@ -16,7 +16,7 @@ dependencies = [ [tool.hatch.version] source = "vcs" -raw-options = { 'root' = '../../' } +raw-options = { 'root' = '../../../' } [tool.hatch.metadata.hooks.vcs.urls] Homepage = "https://jumpstarter.dev" diff --git a/python/packages/jumpstarter-driver-sdwire/pyproject.toml b/python/packages/jumpstarter-driver-sdwire/pyproject.toml index 9333a68ee..1b50ab474 100644 --- a/python/packages/jumpstarter-driver-sdwire/pyproject.toml +++ b/python/packages/jumpstarter-driver-sdwire/pyproject.toml @@ -25,7 +25,7 @@ source_archive = "https://github.com/jumpstarter-dev/repo/archive/{commit_hash}. [tool.hatch.version] source = "vcs" -raw-options = { 'root' = '../../' } +raw-options = { 'root' = '../../../' } [build-system] requires = ["hatchling", "hatch-vcs", "hatch-pin-jumpstarter"] diff --git a/python/packages/jumpstarter-driver-shell/pyproject.toml b/python/packages/jumpstarter-driver-shell/pyproject.toml index ca2641272..a866cfc57 100644 --- a/python/packages/jumpstarter-driver-shell/pyproject.toml +++ b/python/packages/jumpstarter-driver-shell/pyproject.toml @@ -27,7 +27,7 @@ source_archive = "https://github.com/jumpstarter-dev/repo/archive/{commit_hash}. [tool.hatch.version] source = "vcs" -raw-options = { 'root' = '../../' } +raw-options = { 'root' = '../../../' } [build-system] requires = ["hatchling", "hatch-vcs", "hatch-pin-jumpstarter"] diff --git a/python/packages/jumpstarter-driver-sigrok/.gitignore b/python/packages/jumpstarter-driver-sigrok/.gitignore new file mode 100644 index 000000000..cbc5d672b --- /dev/null +++ b/python/packages/jumpstarter-driver-sigrok/.gitignore @@ -0,0 +1,3 @@ +__pycache__/ +.coverage +coverage.xml diff --git a/python/packages/jumpstarter-driver-sigrok/README.md b/python/packages/jumpstarter-driver-sigrok/README.md new file mode 100644 index 000000000..13344e058 --- /dev/null +++ b/python/packages/jumpstarter-driver-sigrok/README.md @@ -0,0 +1,227 @@ +# Sigrok Driver + +`jumpstarter-driver-sigrok` wraps [sigrok-cli](https://sigrok.org/wiki/Sigrok-cli) to provide logic analyzer and oscilloscope capture from Jumpstarter exporters. It supports: +- **Logic analyzers** (digital channels) +- **Oscilloscopes** (analog channels) - voltage waveform capture +- One-shot and streaming capture +- Multiple output formats with parsing (VCD, CSV, Bits, ASCII) + +## Installation + +```shell +pip3 install --extra-index-url https://pkg.jumpstarter.dev/simple/ jumpstarter-driver-sigrok +``` + +## Configuration (exporter) + +```yaml +export: + sigrok: + type: jumpstarter_driver_sigrok.driver.Sigrok + driver: fx2lafw # sigrok driver (demo, fx2lafw, rigol-ds, etc.) + conn: auto # optional: USB VID.PID, serial path, or "auto" for auto-detect + channels: # optional: map device channels to friendly names + D0: clk + D1: mosi + D2: miso + D3: cs +``` + +### Configuration Parameters + +| Parameter | Description | Type | Required | Default | +|-----------|-------------|------|----------|---------| +| `driver` | Sigrok driver name (e.g., `demo`, `fx2lafw`, `rigol-ds`) | str | yes | - | +| `conn` | Connection string (USB VID.PID, serial path, or `"auto"` for auto-detect) | str \| None | no | "auto" | +| `executable` | Path to `sigrok-cli` executable | str | no | Auto-detected from PATH | +| `channels` | Channel mapping from device names (D0, A0) to semantic names (clk, voltage) | dict[str, str] | no | {} (empty) | + +## CaptureConfig Parameters (client-side) + +| Parameter | Description | Type | Required | Default | +|-----------|-------------|------|----------|---------| +| `sample_rate` | Sampling rate (e.g., `"1M"`, `"8MHz"`, `"24000000"`) | str | no | "1M" | +| `samples` | Number of samples to capture (`None` for continuous) | int \| None | no | None | +| `pretrigger` | Number of samples to capture before trigger | int \| None | no | None | +| `triggers` | Trigger conditions by channel name (e.g., `{"cs": "falling"}`) | dict[str, str] \| None | no | None | +| `channels` | List of channel names to capture (overrides defaults) | list[str] \| None | no | None | +| `output_format` | Output format (vcd, csv, bits, ascii, srzip, binary) | str | no | "vcd" | + +## Client API + +- `scan()` — list devices for the configured driver +- `capture(config)` — one-shot capture, returns `CaptureResult` with base64 data +- `capture_stream(config)` — streaming capture via `--continuous` +- `get_driver_info()` — driver, conn, channel map +- `get_channel_map()` — device-to-semantic name mappings +- `list_output_formats()` — supported formats (csv, srzip, vcd, binary, bits, ascii) + +## Output Formats + +The driver supports multiple output formats. **VCD (Value Change Dump) is the default** because: +- ✅ **Efficient**: Only records signal changes (not every sample) +- ✅ **Precise timing**: Includes exact timestamps in nanoseconds +- ✅ **Widely supported**: Standard format for signal analysis tools +- ✅ **Mixed signals**: Handles both digital and analog data + +### Available Formats + +| Format | Use Case | Decoded By | +|--------|----------|------------| +| `vcd` (default) | Change-based signals with timing | `result.decode()` → `list[Sample]` | +| `csv` | All samples with timing | `result.decode()` → `list[Sample]` | +| `bits` | Bit sequences by channel | `result.decode()` → `dict[str, list[int]]` | +| `ascii` | ASCII art visualization | `result.decode()` → `str` | +| `srzip` | Raw sigrok session (for PulseView) | `result.data` (raw bytes) | +| `binary` | Raw binary data | `result.data` (raw bytes) | + +### Output Format Constants + +```python +from jumpstarter_driver_sigrok.common import OutputFormat + +config = CaptureConfig( + sample_rate="1MHz", + samples=1000, + output_format=OutputFormat.VCD, # or CSV, BITS, ASCII, SRZIP, BINARY +) +``` + +## Examples + +### Example 1: Simple Capture (VCD format - default) + +**Python client code:** +```python +from jumpstarter_driver_sigrok.common import CaptureConfig + +# Capture with default VCD format (efficient, change-based with timing) +config = CaptureConfig( + sample_rate="1MHz", + samples=1000, + channels=["D0", "D1", "D2"], # Use device channel names or mapped names +) +result = client.capture(config) + +# Decode VCD to get samples with timing +samples = result.decode() # list[Sample] +for sample in samples[:5]: + print(f"Time: {sample.time}s, Values: {sample.values}") +``` + +**Equivalent sigrok-cli command:** +```bash +sigrok-cli -d fx2lafw -C D0,D1,D2 \ + -c samplerate=1MHz --samples 1000 \ + -O vcd -o /tmp/capture.vcd +``` + +--- + +### Example 2: Triggered Capture with Pretrigger + +**Python client code:** +```python +from jumpstarter_driver_sigrok.common import CaptureConfig + +# Capture with trigger and pretrigger buffer (VCD format - default) +config = CaptureConfig( + sample_rate="8MHz", + samples=20000, + pretrigger=5000, # Capture 5000 samples before trigger + triggers={"D0": "rising"}, # Trigger on D0 rising edge + channels=["D0", "D1", "D2", "D3"], + # output_format defaults to VCD (efficient change-based format) +) +result = client.capture(config) + +# Decode to analyze signal changes with precise timing +samples = result.decode() # list[Sample] - only changes recorded +print(f"Captured {len(samples)} signal changes") + +# Access timing and values +for sample in samples[:3]: + print(f"Time: {sample.time}s, Changed: {sample.values}") +``` + +**Equivalent sigrok-cli command:** +```bash +sigrok-cli -d fx2lafw -C D0,D1,D2,D3 \ + -c samplerate=8MHz,samples=20000,pretrigger=5000 \ + --triggers D0=rising \ + -O vcd -o /tmp/capture.vcd +``` + +--- + +### Example 3: Oscilloscope (Analog Channels) + +**Exporter configuration:** +```yaml +export: + oscilloscope: + type: jumpstarter_driver_sigrok.driver.Sigrok + driver: rigol-ds # or demo for testing + conn: usb # or serial path + channels: + A0: CH1 + A1: CH2 +``` + +**Python client code:** +```python +from jumpstarter_driver_sigrok.common import CaptureConfig, OutputFormat + +# Capture analog waveforms +config = CaptureConfig( + sample_rate="1MHz", + samples=10000, + channels=["CH1", "CH2"], # Analog channels + output_format=OutputFormat.CSV, # CSV for voltage values +) +result = client.capture(config) + +# Parse voltage data +samples = result.decode() # list[Sample] +for sample in samples[:5]: + print(f"Time: {sample.time}s") + print(f" CH1: {sample.values.get('A0', 'N/A')}V") + print(f" CH2: {sample.values.get('A1', 'N/A')}V") +``` + +**Equivalent sigrok-cli command:** +```bash +sigrok-cli -d rigol-ds:conn=usb -C A0=CH1,A1=CH2 \ + -c samplerate=1MHz --samples 10000 \ + -O csv -o /tmp/capture.csv +``` + +--- + +### Example 4: Bits Format (Simple Bit Sequences) + +**Python client code:** +```python +from jumpstarter_driver_sigrok.common import CaptureConfig, OutputFormat + +# Capture in bits format (useful for visual inspection) +config = CaptureConfig( + sample_rate="100kHz", + samples=100, + channels=["D0", "D1", "D2"], + output_format=OutputFormat.BITS, +) +result = client.capture(config) + +# Get bit sequences per channel +bits_by_channel = result.decode() # dict[str, list[int]] +for channel, bits in bits_by_channel.items(): + print(f"{channel}: {''.join(map(str, bits[:20]))}") # First 20 bits +``` + +**Equivalent sigrok-cli command:** +```bash +sigrok-cli -d demo -C D0,D1,D2 \ + -c samplerate=100kHz --samples 100 \ + -O bits -o /tmp/capture.bits +``` diff --git a/python/packages/jumpstarter-driver-sigrok/examples/exporter.yaml b/python/packages/jumpstarter-driver-sigrok/examples/exporter.yaml new file mode 100644 index 000000000..7e1029ea9 --- /dev/null +++ b/python/packages/jumpstarter-driver-sigrok/examples/exporter.yaml @@ -0,0 +1,22 @@ +apiVersion: jumpstarter.dev/v1alpha1 +kind: ExporterConfig +metadata: + namespace: default + name: demo +endpoint: grpc.jumpstarter.192.168.0.203.nip.io:8082 +token: "" +export: + sigrok: + type: jumpstarter_driver_sigrok.driver.Sigrok + config: + driver: demo + conn: auto + channels: + D0: vcc + D1: cs + D2: miso + D3: mosi + D4: clk + D5: sda + D6: scl + diff --git a/python/packages/jumpstarter-driver-sigrok/jumpstarter_driver_sigrok/__init__.py b/python/packages/jumpstarter-driver-sigrok/jumpstarter_driver_sigrok/__init__.py new file mode 100644 index 000000000..7b134cb86 --- /dev/null +++ b/python/packages/jumpstarter-driver-sigrok/jumpstarter_driver_sigrok/__init__.py @@ -0,0 +1,11 @@ +from jumpstarter_driver_sigrok.common import ( + CaptureConfig, + CaptureResult, + DecoderConfig, + OutputFormat, + Sample, +) +from jumpstarter_driver_sigrok.driver import Sigrok + +__all__ = ["Sigrok", "CaptureConfig", "CaptureResult", "DecoderConfig", "OutputFormat", "Sample"] + diff --git a/python/packages/jumpstarter-driver-sigrok/jumpstarter_driver_sigrok/client.py b/python/packages/jumpstarter-driver-sigrok/jumpstarter_driver_sigrok/client.py new file mode 100644 index 000000000..1e1dc9d20 --- /dev/null +++ b/python/packages/jumpstarter-driver-sigrok/jumpstarter_driver_sigrok/client.py @@ -0,0 +1,37 @@ +from dataclasses import dataclass + +from .common import CaptureConfig, CaptureResult +from jumpstarter.client import DriverClient + + +@dataclass(kw_only=True) +class SigrokClient(DriverClient): + """Client methods for the Sigrok driver.""" + + def scan(self) -> str: + return self.call("scan") + + def capture(self, config: CaptureConfig | dict) -> CaptureResult: + return CaptureResult.model_validate(self.call("capture", config)) + + def capture_stream(self, config: CaptureConfig | dict): + """Stream capture data from sigrok-cli. + + Args: + config: CaptureConfig or dict with capture parameters + + Yields: + bytes: Chunks of captured data + """ + for chunk in self.streamingcall("capture_stream", config): + yield chunk + + def get_driver_info(self) -> dict: + return self.call("get_driver_info") + + def get_channel_map(self) -> dict: + return self.call("get_channel_map") + + def list_output_formats(self) -> list[str]: + return self.call("list_output_formats") + diff --git a/python/packages/jumpstarter-driver-sigrok/jumpstarter_driver_sigrok/common.py b/python/packages/jumpstarter-driver-sigrok/jumpstarter_driver_sigrok/common.py new file mode 100644 index 000000000..bffbb97e0 --- /dev/null +++ b/python/packages/jumpstarter-driver-sigrok/jumpstarter_driver_sigrok/common.py @@ -0,0 +1,199 @@ +from __future__ import annotations + +from typing import Any, Iterator + +from pydantic import BaseModel, Field + + +class OutputFormat: + """Constants for sigrok output formats.""" + CSV = "csv" + BITS = "bits" + ASCII = "ascii" + BINARY = "binary" + SRZIP = "srzip" + VCD = "vcd" + + @classmethod + def all(cls) -> list[str]: + return [cls.CSV, cls.BITS, cls.ASCII, cls.BINARY, cls.SRZIP, cls.VCD] + + +class Sample(BaseModel): + """A single sample with timing information.""" + sample: int # Sample index + time: float # Time in seconds (full precision) + values: dict[str, int | float] # Channel values (digital: 0/1, analog: voltage) + + def __str__(self) -> str: + """Format sample with clean time display using appropriate unit (fs/ps/ns/μs/ms/s).""" + time_str = self._format_time(self.time) + return f"Sample(sample={self.sample}, time={time_str}, values={self.values})" + + @staticmethod + def _format_time(time_s: float) -> str: + """Format time in seconds to the most appropriate unit. + + Args: + time_s: Time in seconds + + Returns: + Formatted string like "1.5ns", "2.3μs", "1.5ms", "2s" + """ + # Special case for zero + if time_s == 0: + return "0s" + + abs_time = abs(time_s) + + # Define units in descending order (seconds to femtoseconds) + units = [ + (1.0, "s"), + (1e-3, "ms"), + (1e-6, "μs"), + (1e-9, "ns"), + (1e-12, "ps"), + (1e-15, "fs"), + ] + + # Find the most appropriate unit + for scale, unit in units: + if abs_time >= scale or scale == 1e-15: # Use fs as minimum + value = time_s / scale + # Format with up to 6 significant digits, remove trailing zeros + formatted = f"{value:.6g}" + return f"{formatted}{unit}" + + # Fallback (should never reach here) + return f"{time_s:.6g}s" + + +class DecoderConfig(BaseModel): + """Protocol decoder configuration (real-time during capture).""" + + name: str + channels: dict[str, str] | None = None + options: dict[str, Any] | None = None + annotations: list[str] | None = None + stack: list["DecoderConfig"] | None = None + + +class CaptureConfig(BaseModel): + sample_rate: str = Field(default="1M", description="e.g., 8MHz, 1M, 24000000") + samples: int | None = Field(default=None, description="number of samples; None for continuous") + pretrigger: int | None = Field(default=None, description="samples before trigger") + triggers: dict[str, str] | None = Field(default=None, description="e.g., {'D0': 'rising'}") + channels: list[str] | None = Field(default=None, description="override default channels by name") + output_format: str = Field( + default=OutputFormat.VCD, + description="Output format (default: vcd - efficient change-based format with timing). " + "Options: vcd, csv, srzip, binary, bits, ascii", + ) + decoders: list[DecoderConfig] | None = Field(default=None, description="real-time protocol decoding") + + +class CaptureResult(BaseModel): + """Result from a capture operation. + + Note: data is base64-encoded for reliable JSON transport. Client methods + automatically decode it to bytes for you. + """ + data_b64: str # Base64-encoded binary data + output_format: str + sample_rate: str + channel_map: dict[str, str] + triggers: dict[str, str] | None = None + decoders: list[DecoderConfig] | None = None + + def __str__(self) -> str: + """Format CaptureResult with truncated data_b64 field.""" + data_len = len(self.data_b64) + if data_len <= 50: + data_preview = self.data_b64 + else: + # Show first 50 and last 50 chars with ellipsis + data_preview = f"{self.data_b64[:25]}...{self.data_b64[-25:]} ({data_len} chars)" + + return ( + f"CaptureResult(output_format='{self.output_format}', " + f"sample_rate='{self.sample_rate}', " + f"data_size={len(self.data)} bytes, " + f"channels={len(self.channel_map)}, " + f"data_b64='{data_preview}')" + ) + + @property + def data(self) -> bytes: + """Get the captured data as bytes (auto-decodes from base64).""" + from base64 import b64decode + return b64decode(self.data_b64) + + def decode(self) -> Iterator[Sample] | dict[str, list[int]] | str: + """Parse captured data based on output format. + + Returns: + - CSV format: Iterator[Sample] yielding samples with timing and all values per sample + - VCD format: Iterator[Sample] yielding samples with timing and only changed values + - Bits format: dict[str, list[int]] with channel→bit sequences + - ASCII format: str with ASCII art visualization + - Other formats: raises NotImplementedError (use .data for raw bytes) + + Note: + Channel names in the output depend on how the data was captured: + - If captured with channel mapping, sigrok-cli outputs mapped names (vcc, cs, etc.) + - If captured without mapping, outputs device names (D0, D1, etc.) + + Raises: + NotImplementedError: For binary/srzip formats (use .data property) + """ + if self.output_format == OutputFormat.CSV: + from .csv import parse_csv + samples_data = parse_csv(self.data, self.sample_rate) + return (Sample.model_validate(s) for s in samples_data) + elif self.output_format == OutputFormat.VCD: + from .vcd import parse_vcd + samples_data = parse_vcd(self.data, self.sample_rate) + return (Sample.model_validate(s) for s in samples_data) + elif self.output_format == OutputFormat.BITS: + return self._parse_bits() + elif self.output_format == OutputFormat.ASCII: + return self.data.decode("utf-8") + else: + raise NotImplementedError( + f"Parsing not implemented for {self.output_format} format. " + f"Use .data property to get raw bytes." + ) + + def _parse_bits(self) -> dict[str, list[int]]: + """Parse bits format to dict of channel→bit sequences. + + Sigrok-cli bits format: "D0:10001\\nD1:01110\\n..." + Each line has format "channel_name:bits" + + Note: For large sample counts, sigrok-cli wraps bits across multiple + lines with repeated channel names. We accumulate all occurrences. + """ + text = self.data.decode("utf-8") + lines = [line.strip() for line in text.strip().split("\n") if line.strip()] + + result: dict[str, list[int]] = {} + + for line in lines: + # Bits format: "D0:10001" or "A0:10001" + if ":" in line: + channel_device_name, bits_str = line.split(":", 1) + channel_device_name = channel_device_name.strip() + + # Map device name (D0) to user-friendly name (vcc) if available + channel_name = self.channel_map.get(channel_device_name, channel_device_name) + + # Parse bits from this line + bits = [int(b) for b in bits_str if b in "01"] + + # Accumulate bits for this channel (may appear on multiple lines) + if channel_name not in result: + result[channel_name] = [] + result[channel_name].extend(bits) + + return result + diff --git a/python/packages/jumpstarter-driver-sigrok/jumpstarter_driver_sigrok/csv.py b/python/packages/jumpstarter-driver-sigrok/jumpstarter_driver_sigrok/csv.py new file mode 100644 index 000000000..a0cbdde8a --- /dev/null +++ b/python/packages/jumpstarter-driver-sigrok/jumpstarter_driver_sigrok/csv.py @@ -0,0 +1,139 @@ +"""CSV format parser for sigrok captures.""" + +from __future__ import annotations + +import csv +from typing import Iterator + + +def parse_csv(data: bytes, sample_rate: str) -> Iterator[dict]: + """Parse CSV format to iterator of samples with timing. + + Args: + data: Raw CSV data as bytes + sample_rate: Sample rate string (e.g., "100kHz", "1MHz") + + Yields: + Dicts with keys: sample, time (seconds), values + """ + text = data.decode("utf-8") + lines = text.strip().split("\n") + + # Parse sample rate for timing calculation + sample_rate_hz = _parse_sample_rate_hz(sample_rate) + time_step_s = 1.0 / sample_rate_hz # seconds per sample + + # Skip comment lines and analog preview lines (format: "A0: -10.0000 V DC") + # The actual data starts after a header row with types like "logic,logic,V DC,V DC" + data_lines = _extract_csv_data_lines(lines) + + if not data_lines or len(data_lines) < 2: + return + + # Parse the CSV data + reader = csv.reader(data_lines) + + # First row is types (logic, V DC, etc.) - use for channel name inference + types_row = next(reader) + + # Get channel names from types + channel_names = _infer_channel_names(types_row) + + # Parse and yield data rows one by one + for idx, row in enumerate(reader): + values = _parse_csv_row(channel_names, row) + yield { + "sample": idx, + "time": idx * time_step_s, + "values": values, + } + + +def _parse_sample_rate_hz(sample_rate: str) -> float: + """Parse sample rate string to Hz.""" + rate = sample_rate.strip().upper() + multipliers = {"K": 1e3, "M": 1e6, "G": 1e9} + + for suffix, mult in multipliers.items(): + if rate.endswith(f"{suffix}HZ"): + return float(rate[:-3]) * mult + elif rate.endswith(suffix): + return float(rate[:-1]) * mult + + # Assume Hz if no suffix + return float(rate.rstrip("HZ")) + + +def _extract_csv_data_lines(lines: list[str]) -> list[str]: + """Extract actual CSV data lines, skipping comments and analog preview lines.""" + data_lines = [] + + for _i, line in enumerate(lines): + line = line.strip() + # Skip comment lines + if line.startswith(";"): + continue + # Skip analog preview lines (contain colon, not CSV comma-separated) + if ":" in line and "," not in line: + continue + # This is CSV data + data_lines.append(line) + + return data_lines + + +def _infer_channel_names(types_row: list[str]) -> list[str]: + """Infer channel names from CSV type header row. + + Args: + types_row: List of type strings like ["logic", "logic", "V DC", "V DC"] + + Returns: + List of channel names like ["D0", "D1", "A0", "A1"] + """ + channel_names = [] + digital_count = 0 + analog_count = 0 + + for type_str in types_row: + type_lower = type_str.lower() + if "logic" in type_lower: + channel_names.append(f"D{digital_count}") + digital_count += 1 + elif "v" in type_lower or "dc" in type_lower: + # Analog channel + channel_names.append(f"A{analog_count}") + analog_count += 1 + else: + # Unknown type, use generic name + channel_names.append(f"CH{len(channel_names)}") + + return channel_names + + +def _parse_csv_row(channel_names: list[str], row: list[str]) -> dict[str, int | float]: + """Parse a CSV data row into channel values. + + Args: + channel_names: List of channel names + row: List of value strings + + Returns: + Dict mapping channel name to parsed value + """ + values = {} + + for channel, value in zip(channel_names, row, strict=True): + value = value.strip() + # Try to parse as number (analog) or binary (digital) + try: + if "." in value or "e" in value.lower(): + values[channel] = float(value) + else: + values[channel] = int(value) + except ValueError: + # Keep as string if not a number + values[channel] = value + + return values + diff --git a/python/packages/jumpstarter-driver-sigrok/jumpstarter_driver_sigrok/csv_test.py b/python/packages/jumpstarter-driver-sigrok/jumpstarter_driver_sigrok/csv_test.py new file mode 100644 index 000000000..e15e545a0 --- /dev/null +++ b/python/packages/jumpstarter-driver-sigrok/jumpstarter_driver_sigrok/csv_test.py @@ -0,0 +1,133 @@ +"""Tests for CSV format parser.""" + +from shutil import which + +import pytest + +from .client import SigrokClient +from .common import CaptureConfig, CaptureResult, OutputFormat +from .driver import Sigrok +from jumpstarter.common.utils import serve + + +@pytest.fixture +def demo_driver_instance(): + """Create a Sigrok driver instance configured for the demo device.""" + # Demo driver has 8 digital channels (D0-D7) and 5 analog (A0-A4) + # Map device channels to decoder-friendly semantic names + return Sigrok( + driver="demo", + executable="sigrok-cli", + channels={ + "D0": "vcc", + "D1": "cs", + "D2": "miso", + "D3": "mosi", + "D4": "clk", + "D5": "sda", + "D6": "scl", + "D7": "gnd", + }, + ) + + +@pytest.fixture +def demo_client(demo_driver_instance): + """Create a client for the demo Sigrok driver.""" + with serve(demo_driver_instance) as client: + yield client + + +@pytest.mark.skipif(which("sigrok-cli") is None, reason="sigrok-cli not installed") +def test_csv_format_basic(demo_client: SigrokClient): + """Test CSV format capture with demo driver.""" + cfg = CaptureConfig( + sample_rate="50kHz", + samples=50, + output_format=OutputFormat.CSV, + channels=["vcc", "cs"], # Select specific digital channels + ) + + result = demo_client.capture(cfg) + assert isinstance(result, CaptureResult) + assert isinstance(result.data, bytes) + decoded_data = list(result.decode()) + assert isinstance(decoded_data, list) + assert len(decoded_data) > 0 + # CSV format uses inferred names (D0, D1, etc.) based on column types + # Channel mapping is only preserved in VCD format + first_sample = decoded_data[0] + assert "D0" in first_sample.values or "D1" in first_sample.values + + +@pytest.mark.skipif(which("sigrok-cli") is None, reason="sigrok-cli not installed") +def test_csv_format_timing(demo_client: SigrokClient): + """Test CSV format timing calculations with integer nanoseconds.""" + cfg = CaptureConfig( + sample_rate="100kHz", + samples=50, + output_format=OutputFormat.CSV, + channels=["D0", "D1", "D2"], # Select specific channels + ) + + result = demo_client.capture(cfg) + assert isinstance(result, CaptureResult) + + # Decode the CSV data + samples = list(result.decode()) + assert isinstance(samples, list) + assert len(samples) > 0 + + # Verify timing progresses correctly + for sample in samples: + assert isinstance(sample.time, float) + # Verify timing progresses (1/100kHz = 0.00001s per sample) + assert sample.time == sample.sample * 0.00001 + + +@pytest.mark.skipif(which("sigrok-cli") is None, reason="sigrok-cli not installed") +def test_csv_format_analog_channels(demo_client: SigrokClient): + """Test CSV capture of analog channels with voltage values.""" + cfg = CaptureConfig( + sample_rate="100kHz", + samples=20, + output_format=OutputFormat.CSV, + channels=["A0", "A1"], # Select specific analog channels + ) + + result = demo_client.capture(cfg) + assert isinstance(result, CaptureResult) + assert isinstance(result.data, bytes) + decoded_data = list(result.decode()) + assert isinstance(decoded_data, list) + assert len(decoded_data) > 0 + + # Check first sample for analog values + first_sample = decoded_data[0] + assert len(first_sample.values) > 0 + + # Analog values should be floats (voltages) + for _channel, value in first_sample.values.items(): + assert isinstance(value, (int, float)) + + +@pytest.mark.skipif(which("sigrok-cli") is None, reason="sigrok-cli not installed") +def test_csv_format_mixed_channels(demo_client: SigrokClient): + """Test CSV with both digital and analog channels.""" + cfg = CaptureConfig( + sample_rate="100kHz", + samples=30, + output_format=OutputFormat.CSV, + channels=["D0", "D1", "A0"], # Mix of digital and analog + ) + + result = demo_client.capture(cfg) + samples = list(result.decode()) + + assert isinstance(samples, list) + assert len(samples) > 0 + + # Verify we have values for channels + first_sample = samples[0] + assert len(first_sample.values) > 0 + diff --git a/python/packages/jumpstarter-driver-sigrok/jumpstarter_driver_sigrok/driver.py b/python/packages/jumpstarter-driver-sigrok/jumpstarter_driver_sigrok/driver.py new file mode 100644 index 000000000..35c7b88d0 --- /dev/null +++ b/python/packages/jumpstarter-driver-sigrok/jumpstarter_driver_sigrok/driver.py @@ -0,0 +1,286 @@ +from __future__ import annotations + +import asyncio +import subprocess +from base64 import b64encode +from dataclasses import dataclass, field +from pathlib import Path +from shutil import which +from tempfile import TemporaryDirectory + +from .common import CaptureConfig, DecoderConfig, OutputFormat +from jumpstarter.driver import Driver, export + + +def find_sigrok_cli() -> str | None: + """Find sigrok-cli executable in PATH. + + Returns: + Path to executable or None if not found + """ + return which("sigrok-cli") + + +@dataclass(kw_only=True) +class Sigrok(Driver): + """Sigrok driver wrapping sigrok-cli for logic analyzer and oscilloscope support.""" + + driver: str = "demo" + conn: str | None = "auto" + executable: str | None = field(default_factory=find_sigrok_cli) + channels: dict[str, str] = field(default_factory=dict) + + def __post_init__(self): + if hasattr(super(), "__post_init__"): + super().__post_init__() + + def _ensure_executable(self): + """Ensure sigrok-cli is available.""" + if self.executable is None: + raise FileNotFoundError( + "sigrok-cli executable not found in PATH. " + "Please install sigrok-cli to use this driver." + ) + + @classmethod + def client(cls) -> str: + return "jumpstarter_driver_sigrok.client.SigrokClient" + + # --- Public API ----------------------------------------------------- + + @export + def scan(self) -> str: + """List devices for the configured driver.""" + self._ensure_executable() + assert self.executable is not None + cmd = [self.executable, "--driver", self.driver, "--scan"] + result = subprocess.run(cmd, capture_output=True, text=True, check=True) + return result.stdout + + @export + def get_driver_info(self) -> dict: + return { + "driver": self.driver, + "conn": self.conn, + "channels": self.channels, + } + + @export + def get_channel_map(self) -> dict[str, str]: + return self.channels + + @export + def list_output_formats(self) -> list[str]: + return OutputFormat.all() + + @export + def capture(self, config: CaptureConfig | dict) -> dict: + """One-shot capture; returns dict with base64-encoded binary data.""" + self._ensure_executable() + cfg = CaptureConfig.model_validate(config) + cmd, outfile, tmpdir = self._build_capture_command(cfg) + + try: + self.logger.debug("Running sigrok-cli: %s", " ".join(cmd)) + subprocess.run(cmd, check=True) + + data = outfile.read_bytes() + # Return as dict with base64-encoded data (reliable for JSON transport) + return { + "data_b64": b64encode(data).decode("ascii"), + "output_format": cfg.output_format, + "sample_rate": cfg.sample_rate, + "channel_map": self.channels, + "triggers": cfg.triggers, + "decoders": [d.model_dump() for d in cfg.decoders] if cfg.decoders else None, + } + finally: + tmpdir.cleanup() + + @export + async def capture_stream(self, config: CaptureConfig | dict): + """Streaming capture; yields chunks of binary data from sigrok-cli stdout.""" + self._ensure_executable() + cfg = CaptureConfig.model_validate(config) + cmd = self._build_stream_command(cfg) + + self.logger.debug("streaming sigrok-cli: %s", " ".join(cmd)) + process = await asyncio.create_subprocess_exec( + *cmd, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE, + ) + + try: + if process.stdout is None: + raise RuntimeError("sigrok-cli stdout not available") + + # Stream data in chunks + while True: + chunk = await process.stdout.read(4096) + if not chunk: + break + yield chunk + finally: + process.terminate() + try: + await asyncio.wait_for(process.wait(), timeout=5) + except asyncio.TimeoutError: + process.kill() + + # --- Command builders ----------------------------------------------- + + def _build_capture_command(self, cfg: CaptureConfig) -> tuple[list[str], Path, TemporaryDirectory]: + tmpdir = TemporaryDirectory() + outfile = Path(tmpdir.name) / f"capture.{cfg.output_format}" + + cmd: list[str] = self._base_driver_args() + cmd += self._channel_args(cfg.channels) + cmd += self._config_args(cfg) + cmd += self._trigger_args(cfg) + cmd += self._decoder_args(cfg) + cmd += ["-O", cfg.output_format, "-o", str(outfile)] + + return cmd, outfile, tmpdir + + def _build_stream_command(self, cfg: CaptureConfig) -> list[str]: + cmd: list[str] = self._base_driver_args() + cmd += self._channel_args(cfg.channels) + cmd += self._config_args(cfg, continuous=True) + cmd += self._trigger_args(cfg) + cmd += self._decoder_args(cfg) + cmd += ["-O", cfg.output_format, "-o", "-"] + return cmd + + def _base_driver_args(self) -> list[str]: + assert self.executable is not None + if self.conn and self.conn != "auto": + return [self.executable, "-d", f"{self.driver}:conn={self.conn}"] + return [self.executable, "-d", self.driver] + + def _channel_args(self, selected_names: list[str] | None) -> list[str]: + """Build channel selection/renaming args for sigrok-cli. + + Args: + selected_names: Optional list of semantic names to include + + Returns: + List of args like ["-C", "D0=vcc,D1=cs,D2=miso"] + """ + if not self.channels: + return [] + + # Filter channels if specific names requested + if selected_names: + selected_lower = {name.lower() for name in selected_names} + filtered = {dev: user for dev, user in self.channels.items() if user.lower() in selected_lower} + else: + filtered = self.channels + + # Build channel map: device_name=user_name + channel_map = ",".join(f"{dev}={user}" for dev, user in filtered.items()) + return ["-C", channel_map] if channel_map else [] + + def _config_args(self, cfg: CaptureConfig, *, continuous: bool = False) -> list[str]: + parts = [f"samplerate={cfg.sample_rate}"] + if cfg.pretrigger is not None: + parts.append(f"pretrigger={cfg.pretrigger}") + + args: list[str] = [] + if parts: + args += ["-c", ",".join(parts)] + + # sigrok-cli requires one of: --samples, --frames, --time, or --continuous + # If samples is explicitly specified, use that even for streaming + if cfg.samples is not None: + args.extend(["--samples", str(cfg.samples)]) + elif continuous: + args.append("--continuous") + else: + # Default to 1000 samples if not specified + args.extend(["--samples", "1000"]) + + return args + + def _trigger_args(self, cfg: CaptureConfig) -> list[str]: + if not cfg.triggers: + return [] + trigger_parts = [] + for channel, condition in cfg.triggers.items(): + resolved = self._resolve_channel(channel) + trigger_parts.append(f"{resolved}={condition}") + return ["--triggers", ",".join(trigger_parts)] + + def _decoder_args(self, cfg: CaptureConfig) -> list[str]: + if not cfg.decoders: + return [] + + args: list[str] = [] + for decoder in self._flatten_decoders(cfg.decoders): + pin_map = self._resolve_decoder_channels(decoder) + segments = [decoder.name] + + for pin_name, channel_name in pin_map.items(): + segments.append(f"{pin_name}={self._resolve_channel(channel_name)}") + + if decoder.options: + for key, value in decoder.options.items(): + segments.append(f"{key}={value}") + + args += ["-P", ":".join(segments)] + + if decoder.annotations: + args += ["-A", f"{decoder.name}=" + ",".join(decoder.annotations)] + + return args + + def _flatten_decoders(self, decoders: list[DecoderConfig]) -> list[DecoderConfig]: + flat: list[DecoderConfig] = [] + for decoder in decoders: + flat.append(decoder) + if decoder.stack: + flat.extend(self._flatten_decoders(decoder.stack)) + return flat + + def _resolve_decoder_channels(self, decoder: DecoderConfig) -> dict[str, str]: + if decoder.channels: + return decoder.channels + + # Best-effort auto-mapping based on common decoder pin names + defaults = { + "spi": ["clk", "mosi", "miso", "cs"], + "i2c": ["scl", "sda"], + "uart": ["rx", "tx"], + } + pins = defaults.get(decoder.name.lower()) + if not pins: + return {} + + resolved: dict[str, str] = {} + available_lower = {name.lower(): name for name in self.channels.values()} + for pin in pins: + if pin in available_lower: + resolved[pin] = available_lower[pin] + return resolved + + def _resolve_channel(self, name_or_dn: str) -> str: + """Resolve a user-friendly channel name to device channel name. + + Args: + name_or_dn: User-friendly name (e.g., "clk", "mosi") or device name (e.g., "D0") + + Returns: + Device channel name (e.g., "D0", "D1") + """ + candidate = name_or_dn.strip() + + # If already a device channel name, return as-is + if candidate in self.channels: + return candidate + + # Search for user-friendly name in channel values + for dev_name, user_name in self.channels.items(): + if user_name.lower() == candidate.lower(): + return dev_name + + raise ValueError(f"Channel '{name_or_dn}' not found in channel map {self.channels}") diff --git a/python/packages/jumpstarter-driver-sigrok/jumpstarter_driver_sigrok/driver_test.py b/python/packages/jumpstarter-driver-sigrok/jumpstarter_driver_sigrok/driver_test.py new file mode 100644 index 000000000..087490e61 --- /dev/null +++ b/python/packages/jumpstarter-driver-sigrok/jumpstarter_driver_sigrok/driver_test.py @@ -0,0 +1,495 @@ +from shutil import which + +import pytest + +from .common import CaptureConfig, CaptureResult, OutputFormat +from .driver import Sigrok +from jumpstarter.common.utils import serve + +# Skip all integration tests if sigrok-cli is not available +pytestmark = pytest.mark.skipif( + which("sigrok-cli") is None, + reason="sigrok-cli not found in PATH" +) + + +@pytest.fixture +def demo_driver_instance(): + """Create a Sigrok driver instance configured for the demo device.""" + # Demo driver has 8 digital channels (D0-D7) and 5 analog (A0-A4) + # Map device channels to decoder-friendly semantic names + return Sigrok( + driver="demo", + channels={ + "D0": "vcc", + "D1": "cs", + "D2": "miso", + "D3": "mosi", + "D4": "clk", + "D5": "sda", + "D6": "scl", + "D7": "gnd", + }, + ) + + +@pytest.fixture +def demo_client(demo_driver_instance): + """Create a client connected to demo driver via serve().""" + with serve(demo_driver_instance) as client: + yield client + + +@pytest.mark.skipif(which("sigrok-cli") is None, reason="sigrok-cli not installed") +def test_scan_demo_driver(demo_client): + """Test scanning for demo driver via client.""" + result = demo_client.scan() + assert "demo" in result.lower() or "Demo device" in result + + +@pytest.mark.skipif(which("sigrok-cli") is None, reason="sigrok-cli not installed") +def test_capture_with_demo_driver(demo_client): + """Test one-shot capture with demo driver via client. + + This test verifies client-server serialization through serve() pattern. + """ + cfg = CaptureConfig( + sample_rate="100kHz", + samples=100, + output_format="srzip", + ) + + result = demo_client.capture(cfg) + + # Verify we got a proper CaptureResult Pydantic model, not just a dict + assert isinstance(result, CaptureResult), f"Expected CaptureResult, got {type(result)}" + + # Verify model attributes work correctly - data should be bytes, not base64 string! + assert result.data + assert isinstance(result.data, bytes), f"Expected bytes, got {type(result.data)}" + assert len(result.data) > 0 + assert result.output_format == "srzip" + assert result.sample_rate == "100kHz" + assert isinstance(result.channel_map, dict) + assert len(result.channel_map) > 0 + + +@pytest.mark.skipif(which("sigrok-cli") is None, reason="sigrok-cli not installed") +def test_capture_default_format(demo_client): + """Test capture with default output format (VCD). + + VCD is the default because it's the most efficient format: + - Only records changes (not every sample) + - Includes precise timing information + - Widely supported by signal analysis tools + """ + # Don't specify output_format - should default to VCD + cfg = CaptureConfig( + sample_rate="100kHz", + samples=50, + channels=["D0", "D1", "D2"], + ) + + result = demo_client.capture(cfg) + + # Verify we got VCD format by default + assert isinstance(result, CaptureResult) + assert result.output_format == OutputFormat.VCD + assert isinstance(result.data, bytes) + assert len(result.data) > 0 + + # Verify VCD data can be decoded + samples = list(result.decode()) + assert isinstance(samples, list) + assert len(samples) > 0 + + # Verify samples have timing information (VCD feature) + for sample in samples: + assert hasattr(sample, "time") + assert isinstance(sample.time, float) + assert hasattr(sample, "values") + assert isinstance(sample.values, dict) + + +@pytest.mark.skipif(which("sigrok-cli") is None, reason="sigrok-cli not installed") +def test_capture_csv_format(demo_client): + """Test capture with CSV output format via client.""" + cfg = CaptureConfig( + sample_rate="50kHz", + samples=50, + output_format="csv", + ) + + result = demo_client.capture(cfg) + + # Verify CaptureResult model + assert isinstance(result, CaptureResult) + assert isinstance(result.data, bytes) + + # Decode bytes to string for CSV parsing + csv_text = result.data.decode("utf-8") + + # CSV should have headers and data + assert "vcc" in csv_text or "cs" in csv_text or "clk" in csv_text + + +@pytest.mark.skipif(which("sigrok-cli") is None, reason="sigrok-cli not installed") +def test_capture_analog_channels(): + """Test capturing analog data from oscilloscope/demo driver. + + Verifies that the API works for analog channels (oscilloscopes) + as well as digital channels (logic analyzers). + """ + # Create driver with analog channel mappings + analog_driver = Sigrok( + driver="demo", + channels={ + "A0": "voltage_in", + "A1": "sine_wave", + "A2": "square_wave", + }, + ) + + with serve(analog_driver) as client: + cfg = CaptureConfig( + sample_rate="100kHz", + samples=20, + channels=["voltage_in", "sine_wave"], # Select specific analog channels + output_format="csv", + ) + + result = client.capture(cfg) + + # Verify we got analog data + assert isinstance(result, CaptureResult) + assert isinstance(result.data, bytes) + + # Parse CSV to check for analog voltage values + csv_text = result.data.decode("utf-8") + + # Should contain voltage values with units (V, mV) + assert "V" in csv_text or "mV" in csv_text + # Should contain our channel names or original analog channel names + assert "voltage_in" in csv_text or "sine_wave" in csv_text or "A0" in csv_text or "A1" in csv_text + + +@pytest.mark.skipif(which("sigrok-cli") is None, reason="sigrok-cli not installed") +def test_capture_with_dict_config(demo_client): + """Test capture with dict config (not CaptureConfig object). + + Verifies that dict configs are properly validated and serialized. + """ + # Pass config as dict instead of CaptureConfig object + cfg_dict = { + "sample_rate": "100kHz", + "samples": 100, + "output_format": "srzip", + } + + result = demo_client.capture(cfg_dict) + + # Verify we still get a proper CaptureResult model + assert isinstance(result, CaptureResult) + assert result.data + assert isinstance(result.data, bytes) + assert len(result.data) > 0 + assert result.output_format == "srzip" + + +@pytest.mark.skip(reason="sigrok-cli demo driver doesn't support streaming to stdout (-o -)") +def test_capture_stream_with_demo(demo_client): + """Test streaming capture with demo driver via client. + + Note: sigrok-cli has limitations with streaming output to stdout. + The demo driver and most output formats don't produce data when using `-o -`. + This feature works better with real hardware and certain output formats. + """ + cfg = CaptureConfig( + sample_rate="100kHz", + samples=1000, + output_format="binary", + ) + + received_bytes = 0 + chunk_count = 0 + + # Collect all chunks + for chunk in demo_client.capture_stream(cfg): + received_bytes += len(chunk) + chunk_count += 1 + + # Should have received some data + assert received_bytes > 0 + assert chunk_count > 0 + + +def test_get_driver_info(demo_client): + """Test getting driver information via client. + + Verifies dict serialization through client-server boundary. + """ + info = demo_client.get_driver_info() + + # Verify it's a dict (not a custom object) + assert isinstance(info, dict) + assert info["driver"] == "demo" + assert "channels" in info + assert isinstance(info["channels"], dict) + + +def test_get_channel_map(demo_client): + """Test getting channel mappings via client. + + Verifies dict serialization through client-server boundary. + """ + channels = demo_client.get_channel_map() + + # Verify it's a dict with proper string keys/values + assert isinstance(channels, dict) + assert all(isinstance(k, str) and isinstance(v, str) for k, v in channels.items()) + assert channels["D0"] == "vcc" + assert channels["D4"] == "clk" + assert channels["D7"] == "gnd" + + +def test_list_output_formats(demo_client): + """Test listing supported output formats via client. + + Verifies list serialization through client-server boundary. + """ + formats = demo_client.list_output_formats() + + # Verify it's a proper list of strings + assert isinstance(formats, list) + assert all(isinstance(f, str) for f in formats) + assert "csv" in formats + assert "srzip" in formats + assert "vcd" in formats + assert "binary" in formats + + +@pytest.mark.skipif(which("sigrok-cli") is None, reason="sigrok-cli not installed") +def test_decode_csv_format(demo_client): + """Test decoding CSV format to Sample objects with timing. + + Verifies: + - CSV parsing works through client-server boundary + - Sample objects have timing information + - Values are properly typed (int/float) + """ + from .common import OutputFormat, Sample + + cfg = CaptureConfig( + sample_rate="100kHz", + samples=50, + output_format=OutputFormat.CSV, + channels=["D0", "D1", "D2"], # Select specific channels + ) + + result = demo_client.capture(cfg) + assert isinstance(result, CaptureResult) + + # Decode the CSV data + samples = list(result.decode()) + assert isinstance(samples, list) + assert len(samples) > 0 + + # Verify all samples are Sample objects + for sample in samples: + assert isinstance(sample, Sample) + assert isinstance(sample.sample, int) + assert isinstance(sample.time, float) + assert isinstance(sample.values, dict) + + # Verify timing progresses (1/100kHz = 0.00001s per sample) + assert sample.time == sample.sample * 0.00001 + + # Verify values are present + assert len(sample.values) > 0 + + +@pytest.mark.skipif(which("sigrok-cli") is None, reason="sigrok-cli not installed") +def test_decode_ascii_format(demo_client): + """Test decoding ASCII format returns string visualization. + + Verifies: + - ASCII format decoding works + - Returns string (not bytes) + """ + from .common import OutputFormat + + cfg = CaptureConfig( + sample_rate="50kHz", + samples=20, + output_format=OutputFormat.ASCII, + channels=["D0", "D1"], + ) + + result = demo_client.capture(cfg) + decoded = result.decode() + + # ASCII format should return string + assert isinstance(decoded, str) + assert len(decoded) > 0 + + +@pytest.mark.skipif(which("sigrok-cli") is None, reason="sigrok-cli not installed") +def test_decode_bits_format(demo_client): + """Test decoding bits format to channel→bit sequences. + + Verifies: + - Bits format decoding works + - Returns dict with bit sequences + - Channel names are mapped from device names (D0) to user-friendly names (vcc) + """ + from .common import OutputFormat + + cfg = CaptureConfig( + sample_rate="100kHz", + samples=30, + output_format=OutputFormat.BITS, + channels=["D0", "D1", "D2"], + ) + + result = demo_client.capture(cfg) + decoded = result.decode() + + # Bits format should return dict + assert isinstance(decoded, dict) + assert len(decoded) > 0 + + # Should have user-friendly channel names (vcc, cs, miso) from channel_map + # Not generic names like CH0, CH1 + assert "vcc" in decoded or "D0" in decoded + assert "cs" in decoded or "D1" in decoded + assert "miso" in decoded or "D2" in decoded + + # Each channel should have a list of bits + for channel, bits in decoded.items(): + assert isinstance(channel, str) + assert isinstance(bits, list) + assert all(b in [0, 1] for b in bits) + # Should have bits (at least some, exact count may vary with demo driver timing) + assert len(bits) > 0 + + +@pytest.mark.skipif(which("sigrok-cli") is None, reason="sigrok-cli not installed") +def test_decode_vcd_format(demo_client): + """Test decoding VCD format to Sample objects with timing (changes only). + + Verifies: + - VCD parsing works through client-server boundary + - Sample objects have timing information in nanoseconds + - Only changes are recorded (efficient representation) + """ + from .common import OutputFormat, Sample + + cfg = CaptureConfig( + sample_rate="100kHz", + samples=50, + output_format=OutputFormat.VCD, + channels=["D0", "D1", "D2"], # Select specific channels + ) + + result = demo_client.capture(cfg) + assert isinstance(result, CaptureResult) + + # Decode the VCD data + samples = list(result.decode()) + assert isinstance(samples, list) + assert len(samples) > 0 + + # Verify all samples are Sample objects + for sample in samples: + assert isinstance(sample, Sample) + assert isinstance(sample.sample, int) + assert isinstance(sample.time, float) + assert isinstance(sample.values, dict) + + # VCD only records changes, so each sample should have at least one value + assert len(sample.values) > 0 + + # Values should be integers for digital channels + for _channel, value in sample.values.items(): + assert isinstance(value, int) + + +@pytest.mark.skipif(which("sigrok-cli") is None, reason="sigrok-cli not installed") +def test_decode_vcd_analog_channels(demo_client): + """Test decoding VCD with analog channels. + + Verifies: + - Analog values are parsed correctly in VCD format + - Timing information is in nanoseconds + """ + from .common import OutputFormat, Sample + + cfg = CaptureConfig( + sample_rate="100kHz", + samples=30, + output_format=OutputFormat.VCD, + channels=["A0", "A1"], # Analog channels + ) + + result = demo_client.capture(cfg) + samples = list(result.decode()) + + assert isinstance(samples, list) + assert len(samples) > 0 + + # Check that samples have analog values + first_sample = samples[0] + assert isinstance(first_sample, Sample) + assert isinstance(first_sample.time, float) + assert len(first_sample.values) > 0 + + +@pytest.mark.skipif(which("sigrok-cli") is None, reason="sigrok-cli not installed") +def test_decode_unsupported_format_raises(demo_client): + """Test that decoding unsupported formats raises NotImplementedError.""" + from .common import OutputFormat + + cfg = CaptureConfig( + sample_rate="100kHz", + samples=10, + output_format=OutputFormat.BINARY, + ) + + result = demo_client.capture(cfg) + + # Binary format should not be decodable + with pytest.raises(NotImplementedError): + result.decode() + + +@pytest.mark.skipif(which("sigrok-cli") is None, reason="sigrok-cli not installed") +def test_decode_analog_csv(demo_client): + """Test decoding CSV with analog channels (voltage values). + + Verifies: + - Analog values are parsed as floats + - Timing information is included + """ + from .common import OutputFormat, Sample + + cfg = CaptureConfig( + sample_rate="100kHz", + samples=30, + output_format=OutputFormat.CSV, + channels=["A0", "A1"], # Analog channels + ) + + result = demo_client.capture(cfg) + samples = list(result.decode()) + + assert isinstance(samples, list) + assert len(samples) > 0 + + # Check first sample for analog values + first_sample = samples[0] + assert isinstance(first_sample, Sample) + assert len(first_sample.values) > 0 + + # Analog values should be floats (voltages) + for _channel, value in first_sample.values.items(): + assert isinstance(value, (int, float)) diff --git a/python/packages/jumpstarter-driver-sigrok/jumpstarter_driver_sigrok/vcd.py b/python/packages/jumpstarter-driver-sigrok/jumpstarter_driver_sigrok/vcd.py new file mode 100644 index 000000000..cd2551906 --- /dev/null +++ b/python/packages/jumpstarter-driver-sigrok/jumpstarter_driver_sigrok/vcd.py @@ -0,0 +1,225 @@ +"""VCD (Value Change Dump) format parser for sigrok captures.""" + +from __future__ import annotations + +from typing import Iterator + + +def parse_vcd(data: bytes, sample_rate: str) -> Iterator[dict]: + """Parse VCD format to iterator of samples with timing (changes only). + + VCD format only records when signals change, making it efficient for + sparse data. Each sample represents a time point where one or more + signals changed. + + Args: + data: Raw VCD data as bytes + sample_rate: Sample rate string (not used for VCD as it has its own timescale) + + Yields: + Dicts with keys: sample, time (seconds), values + """ + text = data.decode("utf-8") + lines = text.strip().split("\n") + + # Parse VCD header to extract timescale and channel mapping + timescale_multiplier = 1e-9 # Default: 1 unit = 1 ns = 1e-9 seconds + channel_map: dict[str, str] = {} # symbol → channel name + + for line in lines: + line = line.strip() + + # Parse timescale (e.g., "$timescale 1 us $end" means 1 unit = 1000 ns) + if line.startswith("$timescale"): + timescale_multiplier = _parse_timescale(line) + + # Parse variable definitions (e.g., "$var wire 1 ! D0 $end") + if line.startswith("$var"): + parts = line.split() + if len(parts) >= 5: + symbol = parts[3] # e.g., "!" + channel = parts[4] # e.g., "D0" + channel_map[symbol] = channel + + if line == "$enddefinitions $end": + break + + # Parse and yield value changes one by one + sample_idx = 0 + + for line in lines: + line = line.strip() + if not line or line.startswith("$"): + continue + + # Timestamp line (e.g., "#100 1! 0" 1#") + if line.startswith("#"): + sample_data = _parse_vcd_timestamp_line(line, timescale_multiplier, channel_map) + if sample_data is not None: + sample_data["sample"] = sample_idx + yield sample_data + sample_idx += 1 + + +def _parse_timescale(line: str) -> float: + """Parse timescale line and return multiplier to convert to seconds.""" + parts = line.split() + if len(parts) >= 3: + value = parts[1] + unit = parts[2] + # Convert to seconds multiplier + unit_multipliers = {"s": 1.0, "ms": 1e-3, "us": 1e-6, "ns": 1e-9, "ps": 1e-12} + return float(value) * unit_multipliers.get(unit, 1.0) + return 1.0 + + +def _parse_vcd_timestamp_line(line: str, timescale_multiplier: float, channel_map: dict[str, str]) -> dict | None: + """Parse a VCD timestamp line with value changes. + + Args: + line: Line starting with # (e.g., "#100 1! 0" 1#") + timescale_multiplier: Multiplier to convert time units to seconds + channel_map: Mapping from VCD symbols to channel names + + Returns: + Dict with time (seconds) and values, or None if line is empty + """ + # Split timestamp from values + parts = line.split(maxsplit=1) + time_str = parts[0][1:] # Remove '#' prefix + + # Skip empty time lines + if not time_str: + return None + + time_units = int(time_str) + current_time_s = time_units * timescale_multiplier + current_values: dict[str, int | float] = {} + + # Parse value changes if present on the same line + if len(parts) > 1: + values_str = parts[1] + _parse_vcd_value_changes(values_str, channel_map, current_values) + + # Return sample data if we have values + if current_values: + return {"time": current_time_s, "values": current_values} + + return None + + +def _parse_vcd_value_changes(values_str: str, channel_map: dict[str, str], current_values: dict[str, int | float]): + """Parse value change tokens from a VCD line. + + Modifies current_values dict in place. + + Supports: + - Single-bit: "1!", "0abc" + - Binary: "b11110000 abc" + - Real: "r3.14159 xyz", "r-10.5 !", "r1.23e-5 aa" + """ + i = 0 + while i < len(values_str): + char = values_str[i] + + # Single bit change (e.g., "1!", "0abc" for multi-char identifiers) + if char in "01xzXZ": + symbol, new_i = _extract_symbol(values_str, i + 1) + if symbol in channel_map: + channel = channel_map[symbol] + current_values[channel] = 1 if char == "1" else 0 + i = new_i + + # Binary value (e.g., "b1010 !" or "b1010 abc") + elif char == "b": + value, symbol, new_i = _parse_binary_value(values_str, i, channel_map) + if symbol and value is not None: + current_values[channel_map[symbol]] = value + i = new_i + + # Real (analog) value (e.g., "r3.14 !" or "r-10.5 abc") + elif char == "r": + value, symbol, new_i = _parse_real_value(values_str, i, channel_map) + if symbol and value is not None: + current_values[channel_map[symbol]] = value + i = new_i + + # Skip whitespace + elif char == " ": + i += 1 + else: + i += 1 + + +def _extract_symbol(text: str, start: int) -> tuple[str, int]: + """Extract a VCD symbol (can be multi-character) from text. + + Returns: + Tuple of (symbol, next_position) + """ + end = start + while end < len(text) and text[end] != " ": + end += 1 + return text[start:end], end + + +def _parse_binary_value(values_str: str, start: int, channel_map: dict[str, str]) -> tuple[int | None, str | None, int]: + """Parse a binary value like "b1010 abc". + + Returns: + Tuple of (value, symbol, next_position) + """ + # Extract binary value + value_start = start + 1 + value_end = value_start + while value_end < len(values_str) and values_str[value_end] in "01xzXZ": + value_end += 1 + binary_value = values_str[value_start:value_end] + + # Skip whitespace before symbol + while value_end < len(values_str) and values_str[value_end] == " ": + value_end += 1 + + # Extract symbol + symbol, next_pos = _extract_symbol(values_str, value_end) + + if symbol in channel_map: + try: + return int(binary_value, 2), symbol, next_pos + except ValueError: + return 0, symbol, next_pos + + return None, None, next_pos + + +def _parse_real_value(values_str: str, start: int, channel_map: dict[str, str]) -> tuple[float | None, str | None, int]: + """Parse a real (analog) value like "r3.14 abc" or "r-10.5 !". + + Returns: + Tuple of (value, symbol, next_position) + """ + # Extract real value (number with optional sign, decimal, exponent) + value_start = start + 1 + value_end = value_start + while value_end < len(values_str) and values_str[value_end] not in " ": + if values_str[value_end] in "0123456789-.eE+": + value_end += 1 + else: + break + real_value = values_str[value_start:value_end] + + # Skip whitespace before symbol + while value_end < len(values_str) and values_str[value_end] == " ": + value_end += 1 + + # Extract symbol + symbol, next_pos = _extract_symbol(values_str, value_end) + + if symbol in channel_map: + try: + return float(real_value), symbol, next_pos + except ValueError: + return 0.0, symbol, next_pos + + return None, None, next_pos + diff --git a/python/packages/jumpstarter-driver-sigrok/jumpstarter_driver_sigrok/vcd_test.py b/python/packages/jumpstarter-driver-sigrok/jumpstarter_driver_sigrok/vcd_test.py new file mode 100644 index 000000000..bf0d829c4 --- /dev/null +++ b/python/packages/jumpstarter-driver-sigrok/jumpstarter_driver_sigrok/vcd_test.py @@ -0,0 +1,217 @@ +"""Tests for VCD (Value Change Dump) format parser.""" + +from base64 import b64encode + +from .common import CaptureResult, OutputFormat, Sample + + +def test_vcd_parser_comprehensive(): + """Test VCD parser with manually constructed VCD data covering all features. + + This test validates: + - Single-character identifiers (!, ", #) + - Multi-character identifiers (aa, ab, abc) + - Timescale parsing (microseconds to nanoseconds) + - Single-bit values (0/1) + - X/Z state handling + - Binary values (vectors) + - Real (analog) values with various formats + """ + # Construct a comprehensive VCD file + vcd_content = """$date Mon Dec 8 2025 $end +$version Test VCD Generator $end +$timescale 1 us $end +$scope module test $end +$var wire 1 ! D0 $end +$var wire 1 " D1 $end +$var wire 1 # D2 $end +$var wire 1 aa CH95 $end +$var wire 1 ab CH96 $end +$var wire 8 abc BUS0 $end +$var real 1 xyz ANALOG0 $end +$upscope $end +$enddefinitions $end +#0 1! 0" 1# 0aa 1ab b00001111 abc r-10.5 xyz +#5 0! 1" x# 1aa +#10 z! 0" 1# b11110000 abc r3.14159 xyz +#25 1! 1" 0# 0aa 0ab b10101010 abc r0.0 xyz +#100 0! 0" 0# r1.23e-5 xyz +""" + + # Create a CaptureResult with this VCD data (no channel mapping for parser test) + result = CaptureResult( + data_b64=b64encode(vcd_content.encode("utf-8")).decode("ascii"), + output_format=OutputFormat.VCD, + sample_rate="1MHz", + channel_map={}, + triggers=None, + decoders=None, + ) + + # Parse the VCD + samples = list(result.decode()) + + # Verify we got the expected number of samples + assert len(samples) == 5 + + # Sample 0 at time 0us = 0s + s0 = samples[0] + assert s0.time == 0.0 + # Channel names come directly from VCD (not mapped) + assert s0.values["D0"] == 1 + assert s0.values["D1"] == 0 + assert s0.values["D2"] == 1 + assert s0.values["CH95"] == 0 # Multi-char identifier "aa" + assert s0.values["CH96"] == 1 # Multi-char identifier "ab" + assert s0.values["BUS0"] == 0b00001111 # Binary value + assert abs(s0.values["ANALOG0"] - (-10.5)) < 0.001 # Real value + + # Sample 1 at time 5us = 0.000005s + s1 = samples[1] + assert abs(s1.time - 0.000005) < 1e-12 + assert s1.values["D0"] == 0 + assert s1.values["D1"] == 1 + assert s1.values["D2"] == 0 # X converted to 0 + assert s1.values["CH95"] == 1 + + # Sample 2 at time 10us = 0.00001s + s2 = samples[2] + assert abs(s2.time - 0.00001) < 1e-12 + assert s2.values["D0"] == 0 # Z converted to 0 + assert s2.values["D1"] == 0 + assert s2.values["D2"] == 1 + assert s2.values["BUS0"] == 0b11110000 + assert abs(s2.values["ANALOG0"] - 3.14159) < 0.001 + + # Sample 3 at time 25us = 0.000025s + s3 = samples[3] + assert abs(s3.time - 0.000025) < 1e-12 + assert s3.values["D0"] == 1 + assert s3.values["D1"] == 1 + assert s3.values["D2"] == 0 + assert s3.values["CH95"] == 0 + assert s3.values["CH96"] == 0 + assert s3.values["BUS0"] == 0b10101010 + assert abs(s3.values["ANALOG0"] - 0.0) < 0.001 + + # Sample 4 at time 100us = 0.0001s + s4 = samples[4] + assert abs(s4.time - 0.0001) < 1e-12 + assert s4.values["D0"] == 0 + assert s4.values["D1"] == 0 + assert s4.values["D2"] == 0 + assert abs(s4.values["ANALOG0"] - 1.23e-5) < 1e-10 # Scientific notation + + +def test_vcd_parser_timescale_variations(): + """Test VCD parser with different timescale values.""" + # Test different timescales + test_cases = [ + ("1 ns", 1, 0), # 1ns timescale, time 0 = 0ns + ("1 us", 1000, 0), # 1us timescale, time 0 = 0ns + ("1 ms", 1000000, 0), # 1ms timescale, time 0 = 0ns + ("10 ns", 10, 100 * 10), # 10ns timescale, time 100 = 1000ns + ("100 ns", 100, 50 * 100), # 100ns timescale, time 50 = 5000ns + ] + + for timescale_str, _multiplier, expected_time_ns in test_cases: + vcd_content = f"""$timescale {timescale_str} $end +$var wire 1 ! D0 $end +$enddefinitions $end +#0 1! +#{100 if expected_time_ns else 0} 0! +""" + result = CaptureResult( + data_b64=b64encode(vcd_content.encode("utf-8")).decode("ascii"), + output_format=OutputFormat.VCD, + sample_rate="1MHz", + channel_map={}, + ) + + samples = list(result.decode()) + assert len(samples) >= 1 + # First sample at time 0 + assert samples[0].time == 0.0 + + +def test_vcd_parser_empty_timestamps(): + """Test VCD parser handles empty timestamp lines correctly.""" + vcd_content = """$timescale 1 ns $end +$var wire 1 ! D0 $end +$enddefinitions $end +#0 1! +#10 0! +# +#20 1! +""" + + result = CaptureResult( + data_b64=b64encode(vcd_content.encode("utf-8")).decode("ascii"), + output_format=OutputFormat.VCD, + sample_rate="1MHz", + channel_map={}, + ) + + samples = list(result.decode()) + # Should have 3 samples (empty timestamp line skipped) + assert len(samples) == 3 + assert samples[0].time == 0.0 + assert samples[1].time == 1e-8 # 10ns + assert samples[2].time == 2e-8 # 20ns + + +def test_vcd_parser_large_channel_count(): + """Test VCD parser with large channel counts using multi-char identifiers. + + According to libsigrok vcd_identifier(): + - Channels 0-93: Single char (!, ", ..., ~) + - Channels 94-769: Two lowercase letters (aa, ab, ..., zz) + - Channels 770+: Three lowercase letters (aaa, aab, ...) + """ + # Test identifiers at boundaries + vcd_content = """$timescale 1 ns $end +$var wire 1 ! CH0 $end +$var wire 1 ~ CH93 $end +$var wire 1 aa CH94 $end +$var wire 1 ab CH95 $end +$var wire 1 zz CH769 $end +$var wire 1 aaa CH770 $end +$var wire 1 abc CH800 $end +$enddefinitions $end +#0 1! 0~ 1aa 0ab 1zz 0aaa 1abc +#100 0! 1~ 0aa 1ab 0zz 1aaa 0abc +""" + + result = CaptureResult( + data_b64=b64encode(vcd_content.encode("utf-8")).decode("ascii"), + output_format=OutputFormat.VCD, + sample_rate="1MHz", + channel_map={}, + ) + + samples = list(result.decode()) + + # Verify first sample (channel names come directly from VCD) + assert len(samples) == 2 + s0 = samples[0] + assert isinstance(s0, Sample) + assert s0.time == 0.0 + assert s0.values["CH0"] == 1 # Single char: ! + assert s0.values["CH93"] == 0 # Single char: ~ + assert s0.values["CH94"] == 1 # Two char: aa + assert s0.values["CH95"] == 0 # Two char: ab + assert s0.values["CH769"] == 1 # Two char: zz + assert s0.values["CH770"] == 0 # Three char: aaa + assert s0.values["CH800"] == 1 # Three char: abc + + # Verify second sample + s1 = samples[1] + assert abs(s1.time - 1e-7) < 1e-15 # 100ns + assert s1.values["CH0"] == 0 + assert s1.values["CH93"] == 1 + assert s1.values["CH94"] == 0 + assert s1.values["CH95"] == 1 + assert s1.values["CH769"] == 0 + assert s1.values["CH770"] == 1 + assert s1.values["CH800"] == 0 + diff --git a/python/packages/jumpstarter-driver-sigrok/pyproject.toml b/python/packages/jumpstarter-driver-sigrok/pyproject.toml new file mode 100644 index 000000000..58518119b --- /dev/null +++ b/python/packages/jumpstarter-driver-sigrok/pyproject.toml @@ -0,0 +1,42 @@ +[project] +name = "jumpstarter-driver-sigrok" +dynamic = ["version", "urls"] +description = "Jumpstarter driver wrapping sigrok-cli for logic analyzer and oscilloscope support" +readme = "README.md" +license = "Apache-2.0" +authors = [ + { name = "Miguel Angel Ajo Pelayo", email = "miguelangel@ajo.es" } +] +requires-python = ">=3.11" +dependencies = [ + "jumpstarter", +] + +[tool.hatch.version] +source = "vcs" +raw-options = { 'root' = '../../../'} + +[tool.hatch.metadata.hooks.vcs.urls] +Homepage = "https://jumpstarter.dev" +source_archive = "https://github.com/jumpstarter-dev/repo/archive/{commit_hash}.zip" + +[tool.pytest.ini_options] +addopts = "--cov --cov-report=html --cov-report=xml" +log_cli = true +log_cli_level = "INFO" +testpaths = ["jumpstarter_driver_sigrok"] +asyncio_default_fixture_loop_scope = "function" + +[build-system] +requires = ["hatchling", "hatch-vcs", "hatch-pin-jumpstarter"] +build-backend = "hatchling.build" + +[tool.hatch.build.hooks.pin_jumpstarter] +name = "pin_jumpstarter" + +[dependency-groups] +dev = [ + "pytest-cov>=6.0.0", + "pytest>=8.3.3", + "pytest-asyncio>=0.24.0", +] diff --git a/python/packages/jumpstarter-driver-snmp/pyproject.toml b/python/packages/jumpstarter-driver-snmp/pyproject.toml index 9cb1a54b5..192edcb16 100644 --- a/python/packages/jumpstarter-driver-snmp/pyproject.toml +++ b/python/packages/jumpstarter-driver-snmp/pyproject.toml @@ -37,7 +37,7 @@ source_archive = "https://github.com/jumpstarter-dev/repo/archive/{commit_hash}. [tool.hatch.version] source = "vcs" -raw-options = { 'root' = '../../' } +raw-options = { 'root' = '../../../' } [build-system] requires = ["hatchling", "hatch-vcs", "hatch-pin-jumpstarter"] diff --git a/python/packages/jumpstarter-driver-ssh-mitm/pyproject.toml b/python/packages/jumpstarter-driver-ssh-mitm/pyproject.toml index 9d24fd79b..4d4ca9f2b 100644 --- a/python/packages/jumpstarter-driver-ssh-mitm/pyproject.toml +++ b/python/packages/jumpstarter-driver-ssh-mitm/pyproject.toml @@ -20,7 +20,7 @@ ssh_mitm = "jumpstarter_driver_ssh_mitm" [tool.hatch.version] source = "vcs" -raw-options = { 'root' = '../../'} +raw-options = { 'root' = '../../../'} [tool.hatch.metadata.hooks.vcs.urls] Homepage = "https://jumpstarter.dev" diff --git a/python/packages/jumpstarter-driver-ssh/pyproject.toml b/python/packages/jumpstarter-driver-ssh/pyproject.toml index ee557bc96..e195155a6 100644 --- a/python/packages/jumpstarter-driver-ssh/pyproject.toml +++ b/python/packages/jumpstarter-driver-ssh/pyproject.toml @@ -18,7 +18,7 @@ dependencies = [ [tool.hatch.version] source = "vcs" -raw-options = { 'root' = '../../'} +raw-options = { 'root' = '../../../'} [tool.hatch.metadata.hooks.vcs.urls] Homepage = "https://jumpstarter.dev" diff --git a/python/packages/jumpstarter-driver-tasmota/pyproject.toml b/python/packages/jumpstarter-driver-tasmota/pyproject.toml index ae32a1193..696d957ca 100644 --- a/python/packages/jumpstarter-driver-tasmota/pyproject.toml +++ b/python/packages/jumpstarter-driver-tasmota/pyproject.toml @@ -19,7 +19,7 @@ TasmotaPower = "jumpstarter_driver_tasmota.driver:TasmotaPower" [tool.hatch.version] source = "vcs" -raw-options = { 'root' = '../../' } +raw-options = { 'root' = '../../../' } [tool.hatch.metadata.hooks.vcs.urls] Homepage = "https://jumpstarter.dev" diff --git a/python/packages/jumpstarter-driver-tftp/pyproject.toml b/python/packages/jumpstarter-driver-tftp/pyproject.toml index bb3429a7b..ddf802dac 100644 --- a/python/packages/jumpstarter-driver-tftp/pyproject.toml +++ b/python/packages/jumpstarter-driver-tftp/pyproject.toml @@ -25,7 +25,7 @@ dev = [ [tool.hatch.version] source = "vcs" -raw-options = { 'root' = '../../' } +raw-options = { 'root' = '../../../' } [tool.hatch.metadata.hooks.vcs.urls] Homepage = "https://jumpstarter.dev" diff --git a/python/packages/jumpstarter-driver-tmt/pyproject.toml b/python/packages/jumpstarter-driver-tmt/pyproject.toml index 30278746a..6db0f89d1 100644 --- a/python/packages/jumpstarter-driver-tmt/pyproject.toml +++ b/python/packages/jumpstarter-driver-tmt/pyproject.toml @@ -17,7 +17,7 @@ dependencies = [ [tool.hatch.version] source = "vcs" -raw-options = { 'root' = '../../'} +raw-options = { 'root' = '../../../'} [tool.hatch.metadata.hooks.vcs.urls] Homepage = "https://jumpstarter.dev" diff --git a/python/packages/jumpstarter-driver-uboot/pyproject.toml b/python/packages/jumpstarter-driver-uboot/pyproject.toml index 39f66c289..e08af3621 100644 --- a/python/packages/jumpstarter-driver-uboot/pyproject.toml +++ b/python/packages/jumpstarter-driver-uboot/pyproject.toml @@ -28,7 +28,7 @@ source_archive = "https://github.com/jumpstarter-dev/repo/archive/{commit_hash}. [tool.hatch.version] source = "vcs" -raw-options = { 'root' = '../../' } +raw-options = { 'root' = '../../../' } [tool.uv.sources] jumpstarter-driver-composite = { workspace = true } diff --git a/python/packages/jumpstarter-driver-ustreamer/pyproject.toml b/python/packages/jumpstarter-driver-ustreamer/pyproject.toml index ed239575f..aeae72844 100644 --- a/python/packages/jumpstarter-driver-ustreamer/pyproject.toml +++ b/python/packages/jumpstarter-driver-ustreamer/pyproject.toml @@ -21,7 +21,7 @@ source_archive = "https://github.com/jumpstarter-dev/repo/archive/{commit_hash}. [tool.hatch.version] source = "vcs" -raw-options = { 'root' = '../../' } +raw-options = { 'root' = '../../../' } [build-system] requires = ["hatchling", "hatch-vcs", "hatch-pin-jumpstarter"] diff --git a/python/packages/jumpstarter-driver-vnc/pyproject.toml b/python/packages/jumpstarter-driver-vnc/pyproject.toml index 49ce3545f..93aabbcdc 100644 --- a/python/packages/jumpstarter-driver-vnc/pyproject.toml +++ b/python/packages/jumpstarter-driver-vnc/pyproject.toml @@ -21,7 +21,7 @@ vnc = "jumpstarter_driver_vnc.driver:Vnc" [tool.hatch.version] source = "vcs" -raw-options = { 'root' = '../../'} +raw-options = { 'root' = '../../../'} [tool.hatch.metadata.hooks.vcs.urls] Homepage = "https://jumpstarter.dev" diff --git a/python/packages/jumpstarter-driver-yepkit/pyproject.toml b/python/packages/jumpstarter-driver-yepkit/pyproject.toml index 13607a5d9..a90ae2df7 100644 --- a/python/packages/jumpstarter-driver-yepkit/pyproject.toml +++ b/python/packages/jumpstarter-driver-yepkit/pyproject.toml @@ -19,7 +19,7 @@ Ykush = "jumpstarter_driver_yepkit.driver:Ykush" [tool.hatch.version] source = "vcs" -raw-options = { 'root' = '../../' } +raw-options = { 'root' = '../../../' } [tool.hatch.metadata.hooks.vcs.urls] Homepage = "https://jumpstarter.dev" diff --git a/python/packages/jumpstarter-imagehash/pyproject.toml b/python/packages/jumpstarter-imagehash/pyproject.toml index 92e13e671..3227bfd07 100644 --- a/python/packages/jumpstarter-imagehash/pyproject.toml +++ b/python/packages/jumpstarter-imagehash/pyproject.toml @@ -21,7 +21,7 @@ source_archive = "https://github.com/jumpstarter-dev/repo/archive/{commit_hash}. [tool.hatch.version] source = "vcs" -raw-options = { 'root' = '../../' } +raw-options = { 'root' = '../../../' } [build-system] requires = ["hatchling", "hatch-vcs", "hatch-pin-jumpstarter"] diff --git a/python/packages/jumpstarter-kubernetes/pyproject.toml b/python/packages/jumpstarter-kubernetes/pyproject.toml index 39335b9c0..db3a20d62 100644 --- a/python/packages/jumpstarter-kubernetes/pyproject.toml +++ b/python/packages/jumpstarter-kubernetes/pyproject.toml @@ -30,7 +30,7 @@ source_archive = "https://github.com/jumpstarter-dev/repo/archive/{commit_hash}. [tool.hatch.version] source = "vcs" -raw-options = { 'root' = '../../' } +raw-options = { 'root' = '../../../' } [build-system] requires = ["hatchling", "hatch-vcs", "hatch-pin-jumpstarter"] diff --git a/python/packages/jumpstarter-protocol/pyproject.toml b/python/packages/jumpstarter-protocol/pyproject.toml index 4eaa6fa9d..2cbf7e6a8 100644 --- a/python/packages/jumpstarter-protocol/pyproject.toml +++ b/python/packages/jumpstarter-protocol/pyproject.toml @@ -29,7 +29,7 @@ source_archive = "https://github.com/jumpstarter-dev/repo/archive/{commit_hash}. [tool.hatch.version] source = "vcs" -raw-options = { 'root' = '../../' } +raw-options = { 'root' = '../../../' } [build-system] requires = ["hatchling", "hatch-vcs", "hatch-pin-jumpstarter"] diff --git a/python/packages/jumpstarter-testing/pyproject.toml b/python/packages/jumpstarter-testing/pyproject.toml index 6b3f90ace..2b5f84e02 100644 --- a/python/packages/jumpstarter-testing/pyproject.toml +++ b/python/packages/jumpstarter-testing/pyproject.toml @@ -23,7 +23,7 @@ source_archive = "https://github.com/jumpstarter-dev/repo/archive/{commit_hash}. [tool.hatch.version] source = "vcs" -raw-options = { 'root' = '../../' } +raw-options = { 'root' = '../../../' } [build-system] requires = ["hatchling", "hatch-vcs", "hatch-pin-jumpstarter"] diff --git a/python/packages/jumpstarter/pyproject.toml b/python/packages/jumpstarter/pyproject.toml index 61e54487d..a09ce03f2 100644 --- a/python/packages/jumpstarter/pyproject.toml +++ b/python/packages/jumpstarter/pyproject.toml @@ -44,7 +44,7 @@ source_archive = "https://github.com/jumpstarter-dev/repo/archive/{commit_hash}. [tool.hatch.version] source = "vcs" -raw-options = { 'root' = '../../' } +raw-options = { 'root' = '../../../' } [build-system] requires = ["hatchling", "hatch-vcs", "hatch-pin-jumpstarter"] diff --git a/python/uv.lock b/python/uv.lock index 77b11a509..6087525db 100644 --- a/python/uv.lock +++ b/python/uv.lock @@ -31,6 +31,7 @@ members = [ "jumpstarter-driver-ridesx", "jumpstarter-driver-sdwire", "jumpstarter-driver-shell", + "jumpstarter-driver-sigrok", "jumpstarter-driver-snmp", "jumpstarter-driver-ssh", "jumpstarter-driver-ssh-mitm", @@ -2076,6 +2077,30 @@ dev = [ { name = "pytest-cov", specifier = ">=6.0.0" }, ] +[[package]] +name = "jumpstarter-driver-sigrok" +source = { editable = "packages/jumpstarter-driver-sigrok" } +dependencies = [ + { name = "jumpstarter" }, +] + +[package.dev-dependencies] +dev = [ + { name = "pytest" }, + { name = "pytest-asyncio" }, + { name = "pytest-cov" }, +] + +[package.metadata] +requires-dist = [{ name = "jumpstarter", editable = "packages/jumpstarter" }] + +[package.metadata.requires-dev] +dev = [ + { name = "pytest", specifier = ">=8.3.3" }, + { name = "pytest-asyncio", specifier = ">=0.24.0" }, + { name = "pytest-cov", specifier = ">=6.0.0" }, +] + [[package]] name = "jumpstarter-driver-snmp" source = { editable = "packages/jumpstarter-driver-snmp" } diff --git a/typos.toml b/typos.toml new file mode 100644 index 000000000..3cc13976e --- /dev/null +++ b/typos.toml @@ -0,0 +1,33 @@ +# Typos configuration for Jumpstarter monorepo +# https://github.com/crate-ci/typos + +[default] +extend-ignore-re = [ + # Ignore hash strings (like 321ba1) + "[a-f0-9]{6,}", +] + +[default.extend-words] +# ANDed/ORed are valid technical terms (combined with AND/OR operations) +ANDed = "ANDed" +Ded = "Ded" # suffix of ANDed in generated CRD docs +ORed = "ORed" + +# mosquitto is the name of an MQTT broker, not a typo of "mosquito" +mosquitto = "mosquitto" + +# ser is short for "serialize" in variable names like ser_json_timedelta +ser = "ser" + +[type.gomod] +# Exclude go.mod and go.sum from spell checking +extend-glob = ["go.mod", "go.sum"] +check-file = false + +[files] +extend-exclude = [ + # Generated files that shouldn't be spell-checked + "*.lock", + # Vendored dependencies + "vendor/", +]