diff --git a/.github/actions/configure-k8s-version/action.yaml b/.github/actions/configure-k8s-version/action.yaml new file mode 100644 index 0000000000..168bf3a238 --- /dev/null +++ b/.github/actions/configure-k8s-version/action.yaml @@ -0,0 +1,25 @@ +name: Configure Kubernetes version +description: Update kubernetes.json with the specified Kubernetes version + +inputs: + kubernetes_version: + description: 'Kubernetes version (e.g., 1.31.1)' + required: true + +runs: + using: composite + steps: + - name: Configure Kubernetes version + working-directory: images/capi/packer/config + shell: bash + env: + KUBERNETES_VERSION: ${{ inputs.kubernetes_version }} + run: | + set -euo pipefail + KUBERNETES_RELEASE=$(echo "${KUBERNETES_VERSION}" | cut -d "." -f -2) + sed -i "s/^ \"kubernetes_series\".*/ \"kubernetes_series\": \"v${KUBERNETES_RELEASE}\",/g" kubernetes.json + sed -i "s/^ \"kubernetes_semver\".*/ \"kubernetes_semver\": \"v${KUBERNETES_VERSION}\",/g" kubernetes.json + sed -i "s/^ \"kubernetes_rpm_version\".*/ \"kubernetes_rpm_version\": \"${KUBERNETES_VERSION}\",/g" kubernetes.json + sed -i "s/^ \"kubernetes_deb_version\".*/ \"kubernetes_deb_version\": \"${KUBERNETES_VERSION}-1.1\",/g" kubernetes.json + grep -q "v${KUBERNETES_VERSION}" kubernetes.json || { echo 'ERROR: kubernetes version not set in kubernetes.json'; exit 1; } + cat kubernetes.json diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000000..0abcead57b --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,24 @@ +# Please see the documentation for all configuration options: https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates + +version: 2 +updates: + +# github-actions +- directory: "/" + package-ecosystem: "github-actions" + open-pull-requests-limit: 5 + schedule: + interval: "weekly" + time: "09:00" + # Use America/New_York Standard Time (UTC -05:00) + timezone: "America/New_York" + groups: + all-github-actions: + patterns: [ "*" ] + commit-message: + prefix: "dependabot" + include: scope + labels: + - "ok-to-test" + - "kind/cleanup" + - "release-note-none" diff --git a/.github/workflows/README.md b/.github/workflows/README.md new file mode 100644 index 0000000000..4e2f46e05a --- /dev/null +++ b/.github/workflows/README.md @@ -0,0 +1,165 @@ +# Azure SIG Image Builder - GitHub Actions Workflows + +This directory contains GitHub Actions workflows for building and publishing Azure Shared Image Gallery (SIG) images using the image-builder project. These workflows are the GitHub Actions equivalent of the Azure DevOps pipelines in `images/capi/packer/azure/.pipelines/`. + +## Workflow Overview + +The entire pipeline is defined in a single workflow file, `build-azure-sig.yaml`, which contains all stages as separate jobs: + +## Pipeline Stages + +``` +┌─────────┐ ┌──────────┐ ┌─────────────┐ ┌─────────┐ +│ Build │───▶│ Test │───▶│ Promote │───▶│ Clean │ +└─────────┘ └──────────┘ └─────────────┘ └─────────┘ + (optional) (requires approval) (if build succeeded) +``` + +1. **Build**: Builds the Kubernetes node image using Packer and publishes it to a staging Azure Compute Gallery +2. **Test**: (Optional) Creates a test CAPI cluster using the built image to validate it works correctly +3. **Promote**: (Requires approval) Promotes the image from staging to the community gallery for public access +4. **Clean**: Cleans up staging resources (managed image and staging gallery version) — only runs if the build succeeded + +## Usage + +### Triggering the Workflow + +1. Go to the **Actions** tab in the GitHub repository +2. Select **Build Azure SIG Image** from the workflows list +3. Click **Run workflow** +4. Fill in the required inputs: + +| Input | Required | Description | Example | +|-------|----------|-------------|---------| +| `kubernetes_version` | Yes | Kubernetes version to build | `1.31.1` | +| `os` | Yes | Operating system | `Ubuntu`, `AzureLinux`, or `Windows` | +| `os_version` | Yes | OS version | `24.04`, `22.04`, `2022-containerd` | +| `resource_group` | No | Azure resource group | `cluster-api-gallery` | +| `staging_gallery_name` | No | Staging gallery name | `staging_gallery` | +| `gallery_name` | No | Community gallery name | `community_gallery` | +| `packer_flags` | No | Additional Packer flags | `--on-error=abort` | +| `tags` | No | Custom tags for the image | `env=prod team=infra` | +| `skip_test` | No | Skip the test stage | `true` (default) | +| `skip_promote` | No | Skip the promote stage | `false` | + +### Supported OS and Version Combinations + +| OS | Versions | +|----|----------| +| Ubuntu | `22.04`, `24.04` | +| AzureLinux | `3` | +| Windows | `2022-containerd`, `2025-containerd` | + +## Setup Requirements + +### 1. Azure OIDC Authentication + +Configure Azure OIDC (OpenID Connect) authentication for passwordless authentication from GitHub Actions: + +1. Create an Azure AD application and service principal +2. Configure federated credentials for the GitHub repository +3. Grant the service principal necessary permissions on your Azure subscription + +Add the following secrets to your GitHub repository or organization: + +| Secret | Description | +|--------|-------------| +| `AZURE_CLIENT_ID` | Azure AD application (client) ID | +| `AZURE_TENANT_ID` | Azure AD tenant ID | +| `AZURE_SUBSCRIPTION_ID` | Azure subscription ID | + +For detailed instructions, see: [Azure Login with OIDC](https://github.com/azure/login#login-with-openid-connect-oidc-recommended) + +### 2. GitHub Environment for Approvals + +Create a GitHub Environment for the promotion approval gate: + +1. Go to **Settings** → **Environments** +2. Create a new environment named `image-promotion-approval` +3. Enable **Required reviewers** and add the appropriate team members +4. Optionally configure deployment branches and wait timer + +### 3. Repository/Organization Variables + +Set the following variables in your repository or organization settings for the promotion stage: + +| Variable | Description | Example | +|----------|-------------|---------| +| `EULA_LINK` | URL to the EULA for the image | `https://example.com/eula` | +| `PUBLISHER_EMAIL` | Email for the image publisher | `team@example.com` | +| `PUBLISHER_URI` | URI for the image publisher | `https://example.com` | +| `SIG_PUBLISHER` | Publisher name for image definitions | `MyOrganization` | + +### 4. Azure Resources + +Ensure the following Azure resources are set up: + +- **Resource Group**: A resource group for the compute galleries (default: `cluster-api-gallery`) +- **Staging Gallery**: An Azure Compute Gallery for initial image publishing +- **Community Gallery**: An Azure Compute Gallery with community permissions for public access + +The workflows will create these resources if they don't exist, provided the service principal has sufficient permissions. + +### Required Azure RBAC Permissions + +The service principal needs the following permissions: + +- `Contributor` on the resource group (or subscription) +- `User Access Administrator` if creating new resource groups +- For community galleries: permissions to create and manage Shared Image Galleries + +## Artifacts + +The workflows produce the following artifacts: + +| Artifact | Description | Retention | +|----------|-------------|-----------| +| `publishing-info` | JSON file with image metadata from the build stage | 7 days | +| `sig-publishing` | JSON file with community gallery publishing details | 30 days | + +## Differences from Azure DevOps Pipelines + +| Feature | Azure DevOps | GitHub Actions | +|---------|--------------|----------------| +| Authentication | Service Connection | Azure OIDC via `azure/login@v2` | +| Approvals | ADO Environments | GitHub Environments | +| Artifacts | Pipeline Artifacts | GitHub Actions Artifacts | +| Variables | Pipeline Variables | Workflow Inputs + Repository Variables | +| Templates | YAML Templates | Jobs within a single workflow | + +## Troubleshooting + +### Common Issues + +1. **Authentication failures** + - Verify OIDC credentials are correctly configured + - Check that the federated credential matches the repository and branch + +2. **Permission denied errors** + - Ensure the service principal has sufficient Azure RBAC permissions + - Verify the subscription ID is correct + +3. **Packer build failures** + - Check the Packer output in the build logs + - Verify the OS/version combination is supported + - Ensure the Kubernetes version exists + +4. **Test stage failures** + - The test stage requires the Azure CAPI CLI extension + - Ensure sufficient quota for VMs in the target region + +### Debug Mode + +To enable debug output, add `--on-error=abort` to the `packer_flags` input to preserve the Packer VM on failure for investigation. + +> **Warning:** Do **not** use `--on-error=ask` — it will cause the workflow to hang indefinitely waiting for interactive input, consuming the entire job timeout. + +For more verbose logging, you can enable GitHub Actions debug logging by setting the `ACTIONS_STEP_DEBUG` secret to `true`. + +## Related Documentation + +- [Image Builder Documentation](../../docs/book/src/capi/capi.md) +- [Azure Provider Documentation](../../images/capi/packer/azure/README.md) +- [Azure DevOps Pipelines](../../images/capi/packer/azure/.pipelines/) +- [GitHub Actions Documentation](https://docs.github.com/en/actions) +- [Azure Login Action](https://github.com/azure/login) diff --git a/.github/workflows/build-azure-sig.yaml b/.github/workflows/build-azure-sig.yaml new file mode 100644 index 0000000000..1c86351907 --- /dev/null +++ b/.github/workflows/build-azure-sig.yaml @@ -0,0 +1,859 @@ +# GitHub Actions workflow for building and publishing Azure Shared Image Gallery (SIG) images +# +# Required secrets: +# - AZURE_CLIENT_ID - Azure service principal client ID (for OIDC authentication) +# - AZURE_TENANT_ID - Azure tenant ID +# - AZURE_SUBSCRIPTION_ID - Azure subscription ID +# +# Required environment variables (set in GitHub repository or organization settings): +# - EULA_LINK - the URL to the EULA for the image (for promote stage) +# - PUBLISHER_EMAIL - the email for the image publisher (for promote stage) +# - PUBLISHER_URI - the URI for the image publisher (for promote stage) +# - SIG_PUBLISHER - the publisher for the image definition (for promote stage) +# +# Required inputs: +# - kubernetes_version - version of Kubernetes to build the image with, e.g. `1.31.1` +# - os - operating system distro, such as 'Ubuntu', 'AzureLinux', or 'Windows' +# - os_version - version of distro, such as `24.04` or `2022-containerd` +# +# Optional inputs: +# - resource_group - name of the Azure resource group to use for the compute galleries +# - staging_gallery_name - name of the Azure compute gallery for initial image publishing +# - gallery_name - name of the Azure community gallery for final image publishing +# - packer_flags - additional flags to pass to packer +# - tags - tags to apply to the image +# - replicated_regions - space-separated list of Azure regions to replicate the image to +# - skip_test - skip the test stage +# - skip_promote - skip the promote stage +# - packer_debug - enable Packer debug logging (sets PACKER_LOG=1) + +name: Build Azure SIG Image + +on: + workflow_dispatch: + inputs: + kubernetes_version: + description: 'Kubernetes version (e.g., 1.31.1)' + required: true + type: string + os: + description: 'Operating system (Ubuntu, AzureLinux, Windows)' + required: true + type: choice + options: + - Ubuntu + - AzureLinux + - Windows + os_version: + description: 'OS version (e.g., 24.04, 2022-containerd)' + required: true + type: string + resource_group: + description: 'Azure resource group name' + required: false + type: string + default: 'cluster-api-gallery' + staging_gallery_name: + description: 'Staging gallery name' + required: false + type: string + default: 'staging_gallery' + gallery_name: + description: 'Community gallery name' + required: false + type: string + default: 'community_gallery' + packer_flags: + description: 'Additional Packer flags' + required: false + type: string + default: '' + tags: + description: 'Tags to apply to the image' + required: false + type: string + default: '' + azure_location: + description: 'Azure region for image build (must match community gallery location and have VM quota for testing)' + required: false + type: choice + options: + - northcentralus + - canadacentral + - switzerlandnorth + - australiaeast + - francecentral + - germanywestcentral + - uksouth + default: 'northcentralus' + replicated_regions: + description: 'Space-separated Azure regions to replicate the image to (image build region is always included)' + required: false + type: string + default: 'australiaeast canadacentral eastus eastus2 francecentral germanywestcentral northcentralus northeurope switzerlandnorth uksouth westeurope westus2' + skip_test: + description: 'Skip the test stage' + required: false + type: boolean + default: false + skip_promote: + description: 'Skip the promote stage (requires manual approval)' + required: false + type: boolean + default: false + packer_debug: + description: 'Enable Packer debug logging (PACKER_LOG=1)' + required: false + type: boolean + default: false + +permissions: + id-token: write + contents: read + +jobs: + # --------------------------------------------------------------------------- + # Build + # --------------------------------------------------------------------------- + build: + name: Build SIG Image + runs-on: ubuntu-latest + timeout-minutes: 120 + env: + KUBERNETES_VERSION: ${{ inputs.kubernetes_version }} + OS: ${{ inputs.os }} + OS_VERSION: ${{ inputs.os_version }} + RESOURCE_GROUP: ${{ inputs.resource_group }} + STAGING_GALLERY_NAME: ${{ inputs.staging_gallery_name }} + AZURE_LOCATION: ${{ inputs.azure_location }} + PACKER_FLAGS: ${{ inputs.packer_flags }} + TAGS_INPUT: ${{ inputs.tags }} + PACKER_LOG: ${{ inputs.packer_debug && '1' || '0' }} + + steps: + - name: Checkout repository + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + + - name: Configure Kubernetes version + uses: ./.github/actions/configure-k8s-version + with: + kubernetes_version: ${{ inputs.kubernetes_version }} + + - name: Check for Windows kube-proxy image + if: inputs.os == 'Windows' + run: | + set -euo pipefail + IMAGE="sigwindowstools/kube-proxy" + TAG="v${KUBERNETES_VERSION/+/_}-calico-hostprocess" + echo "Checking for Windows kube-proxy image ${IMAGE}:${TAG}" + + # Use the Docker Hub Registry v2 API to verify the tag exists. + TOKEN=$(curl -fsSL "https://auth.docker.io/token?service=registry.docker.io&scope=repository:${IMAGE}:pull" | jq -r .token) + HTTP_STATUS=$(curl -s -o /dev/null -w "%{http_code}" \ + -H "Authorization: Bearer ${TOKEN}" \ + -H "Accept: application/vnd.docker.distribution.manifest.v2+json" \ + -H "Accept: application/vnd.docker.distribution.manifest.list.v2+json" \ + "https://registry-1.docker.io/v2/${IMAGE}/manifests/${TAG}") + + if [[ "${HTTP_STATUS}" != "200" ]]; then + echo "kube-proxy image ${IMAGE}:${TAG} not found (HTTP ${HTTP_STATUS})" + exit 1 + fi + echo "kube-proxy image ${IMAGE}:${TAG} exists" + + - name: Azure Login + uses: azure/login@532459ea530d8321f2fb9bb10d1e0bcf23869a43 # v3.0.0 + with: + client-id: ${{ secrets.AZURE_CLIENT_ID }} + tenant-id: ${{ secrets.AZURE_TENANT_ID }} + subscription-id: ${{ secrets.AZURE_SUBSCRIPTION_ID }} + + - name: Setup Python + uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 + with: + python-version: '3.12' + + - name: Install dependencies + working-directory: images/capi + env: + PACKER_GITHUB_API_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + pip install ansible ansible-lint + make deps-azure + + - name: Build SIG Image + id: build + working-directory: images/capi + run: | + set -euo pipefail + + os=$(echo "${OS}" | tr '[:upper:]' '[:lower:]') + version=$(echo "${OS_VERSION}" | tr '[:upper:]' '[:lower:]' | tr -d .) + + export RESOURCE_GROUP="${RESOURCE_GROUP:-cluster-api-gallery}" + export RESOURCE_GROUP_NAME="${RESOURCE_GROUP}" + + # timestamp is in RFC-3339 format to match kubetest + export TIMESTAMP="$(date -u '+%Y-%m-%dT%H:%M:%SZ')" + export JOB_NAME="${JOB_NAME:-"image-builder-sig-${os}-${version}"}" + + if [[ -n "${TAGS_INPUT}" ]]; then + export TAGS="${TAGS_INPUT}" + else + export TAGS="creationTimestamp=${TIMESTAMP} jobName=${JOB_NAME} DO-NOT-DELETE=UpstreamInfra" + fi + printf '%s' "${TAGS}" | tee packer/azure/tags.out + + export GALLERY_NAME="${STAGING_GALLERY_NAME:-staging_gallery}" + DISTRO="${os}-${version}" + echo "DISTRO=${DISTRO}" >> $GITHUB_ENV + + export PACKER_FLAGS="${PACKER_FLAGS} --var sig_image_version=${KUBERNETES_VERSION}" + export PATH=$PATH:$HOME/.local/bin + export USE_AZURE_CLI_AUTH="True" + + make build-azure-sig-${os}-${version} | tee packer/azure/packer.out + + - name: Generate SIG publishing info + id: publishing_info + working-directory: images/capi + run: | + set -euo pipefail + + PACKER_OUTPUT=packer/azure/packer.out + OS_TYPE=$(sed -n 's/^OSType: \(.*\)/\1/p' $PACKER_OUTPUT | tail -1) + MANAGED_IMAGE_RESOURCE_GROUP_NAME=$(sed -n "s/^ManagedImageResourceGroupName: \(.*\)/\1/p" $PACKER_OUTPUT | tail -1) + MANAGED_IMAGE_NAME=$(sed -n "s/^ManagedImageName: \(.*\)/\1/p" $PACKER_OUTPUT | tail -1) + MANAGED_IMAGE_ID=$(sed -n "s/^ManagedImageId: \(.*\)/\1/p" $PACKER_OUTPUT | tail -1) + MANAGED_IMAGE_LOCATION=$(sed -n "s/^ManagedImageLocation: \(.*\)/\1/p" $PACKER_OUTPUT | tail -1) + MANAGED_IMAGE_SHARED_IMAGE_GALLERY_ID=$(sed -n "s/^ManagedImageSharedImageGalleryId: \(.*\)/\1/p" $PACKER_OUTPUT | tail -1) + SHARED_IMAGE_GALLERY_RESOURCE_GROUP=$(sed -n "s/^SharedImageGalleryResourceGroup: \(.*\)/\1/p" $PACKER_OUTPUT | tail -1) + SHARED_IMAGE_GALLERY_NAME=$(sed -n "s/^SharedImageGalleryName: \(.*\)/\1/p" $PACKER_OUTPUT | tail -1) + SHARED_IMAGE_GALLERY_IMAGE_NAME=$(sed -n "s/^SharedImageGalleryImageName: \(.*\)/\1/p" $PACKER_OUTPUT | tail -1) + SHARED_IMAGE_GALLERY_IMAGE_VERSION=$(sed -n "s/^SharedImageGalleryImageVersion: \(.*\)/\1/p" $PACKER_OUTPUT | tail -1) + TAGS=$(cat packer/azure/tags.out) + + if [[ ${SHARED_IMAGE_GALLERY_IMAGE_NAME} == *gen2 ]]; then + HYPERV_GEN="V2" + else + HYPERV_GEN="V1" + fi + + # Create JSON and output it + PUBLISHING_INFO=$(jq -n \ + --arg distro "${DISTRO}" \ + --arg hyperv_gen "${HYPERV_GEN}" \ + --arg os_type "${OS_TYPE}" \ + --arg managed_image_resource_group_name "${MANAGED_IMAGE_RESOURCE_GROUP_NAME}" \ + --arg managed_image_name "${MANAGED_IMAGE_NAME}" \ + --arg managed_image_id "${MANAGED_IMAGE_ID}" \ + --arg managed_image_location "${MANAGED_IMAGE_LOCATION}" \ + --arg managed_image_shared_image_gallery_id "${MANAGED_IMAGE_SHARED_IMAGE_GALLERY_ID}" \ + --arg shared_image_gallery_resource_group "${SHARED_IMAGE_GALLERY_RESOURCE_GROUP}" \ + --arg shared_image_gallery_name "${SHARED_IMAGE_GALLERY_NAME}" \ + --arg shared_image_gallery_image_name "${SHARED_IMAGE_GALLERY_IMAGE_NAME}" \ + --arg shared_image_gallery_image_version "${SHARED_IMAGE_GALLERY_IMAGE_VERSION}" \ + --arg tags "${TAGS}" \ + '$ARGS.named') + + echo "Publishing info: ${PUBLISHING_INFO}" + + # Save to file for artifact + echo "${PUBLISHING_INFO}" > packer/azure/sig-publishing-info.json + + - name: Upload publishing info artifact + uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1 + with: + name: publishing-info + path: images/capi/packer/azure/sig-publishing-info.json + retention-days: 7 + + # --------------------------------------------------------------------------- + # Test + # --------------------------------------------------------------------------- + # Boots a single VM from the image built in the previous stage and verifies + # that the key Kubernetes components (kubelet, kubeadm, containerd/dockerd) + # are present and report the expected version. For Linux images, the VM is + # accessed via SSH; for Windows images, via az vm run-command. + # --------------------------------------------------------------------------- + test: + name: Test SIG Image + needs: build + if: ${{ !inputs.skip_test }} + runs-on: ubuntu-latest + timeout-minutes: 15 + env: + KUBERNETES_VERSION: ${{ inputs.kubernetes_version }} + + steps: + - name: Checkout repository + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + + - name: Download publishing info artifact + uses: actions/download-artifact@3e5f45b2cfb9172054b4087a40e8e0b5a5461e7c # v8.0.1 + with: + name: publishing-info + path: images/capi/packer/azure/sig/ + + - name: Import variables from build + id: vars + run: | + set -euo pipefail + + PUBLISHING_INFO=$(jq -c . images/capi/packer/azure/sig/sig-publishing-info.json) + echo "PUBLISHING_INFO=${PUBLISHING_INFO}" + + echo "OS_TYPE=$(echo "$PUBLISHING_INFO" | jq -r .os_type)" >> $GITHUB_OUTPUT + echo "MANAGED_IMAGE_ID=$(echo "$PUBLISHING_INFO" | jq -r .managed_image_id)" >> $GITHUB_OUTPUT + echo "MANAGED_IMAGE_LOCATION=$(echo "$PUBLISHING_INFO" | jq -r .managed_image_location)" >> $GITHUB_OUTPUT + echo "TAGS=$(echo "$PUBLISHING_INFO" | jq -r .tags)" >> $GITHUB_OUTPUT + + # Read expected containerd version from the packer config + CONTAINERD_VERSION=$(jq -r .containerd_version images/capi/packer/config/containerd.json) + echo "CONTAINERD_VERSION=${CONTAINERD_VERSION}" + echo "CONTAINERD_VERSION=${CONTAINERD_VERSION}" >> $GITHUB_OUTPUT + + - name: Azure Login + uses: azure/login@532459ea530d8321f2fb9bb10d1e0bcf23869a43 # v3.0.0 + with: + client-id: ${{ secrets.AZURE_CLIENT_ID }} + tenant-id: ${{ secrets.AZURE_TENANT_ID }} + subscription-id: ${{ secrets.AZURE_SUBSCRIPTION_ID }} + + - name: Create test resource group + env: + MANAGED_IMAGE_LOCATION: ${{ steps.vars.outputs.MANAGED_IMAGE_LOCATION }} + TAGS: ${{ steps.vars.outputs.TAGS }} + run: | + set -euo pipefail + TEST_RESOURCE_GROUP="image-builder-test-${GITHUB_RUN_ID}" + echo "TEST_RESOURCE_GROUP=${TEST_RESOURCE_GROUP}" >> $GITHUB_ENV + az group create \ + -n "${TEST_RESOURCE_GROUP}" \ + -l "${MANAGED_IMAGE_LOCATION}" \ + --tags ${TAGS:-} + + - name: Create test VM + id: vm + env: + MANAGED_IMAGE_ID: ${{ steps.vars.outputs.MANAGED_IMAGE_ID }} + MANAGED_IMAGE_LOCATION: ${{ steps.vars.outputs.MANAGED_IMAGE_LOCATION }} + OS_TYPE: ${{ steps.vars.outputs.OS_TYPE }} + TAGS: ${{ steps.vars.outputs.TAGS }} + run: | + set -euo pipefail + + VM_NAME="image-test-vm" + echo "VM_NAME=${VM_NAME}" >> $GITHUB_ENV + + # Generate SSH key for Linux VMs + TMPDIR=$(mktemp -d) + echo "TMPDIR=${TMPDIR}" >> $GITHUB_ENV + ssh-keygen -t rsa -b 2048 -f "${TMPDIR}/sshkey" -N "" -q + + create_args=( + --resource-group "${TEST_RESOURCE_GROUP}" + --name "${VM_NAME}" + --image "${MANAGED_IMAGE_ID}" + --size "Standard_D2s_v3" + --location "${MANAGED_IMAGE_LOCATION}" + --tags ${TAGS:-} + ) + + if [[ "${OS_TYPE}" == "Windows" ]]; then + create_args+=( + --admin-username "capi" + --admin-password "Capi\$Test$(date +%s | tail -c 6)!" + ) + else + create_args+=( + --admin-username "capi" + --ssh-key-values "${TMPDIR}/sshkey.pub" + --authentication-type ssh + ) + fi + + echo "Creating test VM '${VM_NAME}' from image …" + az vm create "${create_args[@]}" \ + --public-ip-sku Standard \ + --output json | tee "${TMPDIR}/vm-create.json" + + PUBLIC_IP=$(jq -r '.publicIpAddress' "${TMPDIR}/vm-create.json") + echo "VM public IP: ${PUBLIC_IP}" + echo "PUBLIC_IP=${PUBLIC_IP}" >> $GITHUB_ENV + + - name: Wait for VM to be ready + env: + OS_TYPE: ${{ steps.vars.outputs.OS_TYPE }} + run: | + set -euo pipefail + + if [[ "${OS_TYPE}" == "Windows" ]]; then + echo "Waiting for Windows VM agent to be ready …" + timeout 300 bash -c " + until az vm get-instance-view \ + --resource-group '${TEST_RESOURCE_GROUP}' \ + --name '${VM_NAME}' \ + --query 'instanceView.vmAgent.statuses[0].displayStatus' \ + -o tsv 2>/dev/null | grep -q 'Ready'; do + sleep 10 + done + " + else + echo "Waiting for SSH to become available …" + timeout 300 bash -c " + until ssh -o StrictHostKeyChecking=no -o ConnectTimeout=5 \ + -i "${TMPDIR}/sshkey" capi@'${PUBLIC_IP}' 'echo ready' 2>/dev/null; do + sleep 10 + done + " + fi + + echo "VM is ready" + + - name: Run smoke tests + env: + OS_TYPE: ${{ steps.vars.outputs.OS_TYPE }} + CONTAINERD_VERSION: ${{ steps.vars.outputs.CONTAINERD_VERSION }} + run: | + set -euo pipefail + + EXPECTED_K8S="v${KUBERNETES_VERSION}" + FAILURES=0 + + if [[ "${OS_TYPE}" == "Windows" ]]; then + echo "::group::Windows smoke tests" + + az vm run-command invoke \ + --resource-group "${TEST_RESOURCE_GROUP}" \ + --name "${VM_NAME}" \ + --command-id RunPowerShellScript \ + --scripts ' + $ErrorActionPreference = "Stop" + Write-Output "=== kubelet ===" + & "C:\k\kubelet.exe" --version + Write-Output "=== kubeadm ===" + & "C:\k\kubeadm.exe" version -o short + Write-Output "=== kubectl ===" + & "C:\k\kubectl.exe" version --client -o yaml + Write-Output "=== containerd ===" + & "C:\Program Files\containerd\containerd.exe" --version + Write-Output "=== crictl ===" + & "C:\k\crictl.exe" version + Write-Output "=== containerd service ===" + Get-Service containerd | Format-Table -AutoSize + Write-Output "=== System info ===" + Get-ComputerInfo | Select-Object WindowsProductName, OsVersion, OsArchitecture + ' --output json | tee /tmp/smoke-output.json + + OUTPUT=$(jq -r '.value[0].message' /tmp/smoke-output.json) + echo "${OUTPUT}" + + echo "::endgroup::" + + # Verify Kubernetes version + if echo "${OUTPUT}" | grep -q "${EXPECTED_K8S}"; then + echo "✓ Kubernetes version ${EXPECTED_K8S} confirmed" + else + echo "::error::Expected Kubernetes version ${EXPECTED_K8S} not found in output" + FAILURES=$((FAILURES + 1)) + fi + + # Verify containerd version + if echo "${OUTPUT}" | grep -q "${CONTAINERD_VERSION}"; then + echo "✓ containerd version ${CONTAINERD_VERSION} confirmed" + else + echo "::error::Expected containerd version ${CONTAINERD_VERSION} not found in output" + FAILURES=$((FAILURES + 1)) + fi + + else + SSH_OPTS="-o StrictHostKeyChecking=no -o ConnectTimeout=10 -i ${TMPDIR}/sshkey" + SSH_CMD="ssh ${SSH_OPTS} capi@${PUBLIC_IP}" + + echo "::group::Linux smoke tests — version checks" + + echo "--- OS info ---" + ${SSH_CMD} 'cat /etc/os-release' || true + + echo "--- kubelet version ---" + KUBELET_VERSION=$(${SSH_CMD} 'kubelet --version') + echo "${KUBELET_VERSION}" + + echo "--- kubeadm version ---" + KUBEADM_VERSION=$(${SSH_CMD} 'kubeadm version -o short') + echo "${KUBEADM_VERSION}" + + echo "--- kubectl version ---" + ${SSH_CMD} 'kubectl version --client -o yaml 2>/dev/null || kubectl version --client' || true + + echo "--- containerd version ---" + ACTUAL_CONTAINERD=$(${SSH_CMD} 'containerd --version') + echo "${ACTUAL_CONTAINERD}" + + echo "--- crictl version ---" + ${SSH_CMD} 'sudo crictl version' || true + + echo "::endgroup::" + + echo "::group::Linux smoke tests — post-boot validation" + + echo "--- cloud-init status ---" + CLOUD_INIT_STATUS=$(${SSH_CMD} 'cloud-init status 2>/dev/null || echo "not available"') + echo "${CLOUD_INIT_STATUS}" + + echo "--- systemd service states ---" + ${SSH_CMD} 'systemctl is-enabled kubelet containerd' || true + ${SSH_CMD} 'systemctl is-active containerd' || true + + echo "--- containerd runtime ready (crictl info) ---" + ${SSH_CMD} 'sudo crictl info' || true + + echo "--- pre-pulled container images ---" + ${SSH_CMD} "sudo crictl images" || true + + echo "--- kubeadm init dry-run ---" + if ${SSH_CMD} "sudo kubeadm init --dry-run --kubernetes-version '${EXPECTED_K8S}'" 2>&1; then + echo "✓ kubeadm init dry-run succeeded" + else + echo "::error::kubeadm init --dry-run failed" + FAILURES=$((FAILURES + 1)) + fi + + echo "::endgroup::" + + # Verify Kubernetes version + if echo "${KUBELET_VERSION}" | grep -q "${EXPECTED_K8S}"; then + echo "✓ Kubernetes version ${EXPECTED_K8S} confirmed (kubelet)" + else + echo "::error::Expected kubelet version ${EXPECTED_K8S} but got: ${KUBELET_VERSION}" + FAILURES=$((FAILURES + 1)) + fi + + if echo "${KUBEADM_VERSION}" | grep -q "${EXPECTED_K8S}"; then + echo "✓ Kubernetes version ${EXPECTED_K8S} confirmed (kubeadm)" + else + echo "::error::Expected kubeadm version ${EXPECTED_K8S} but got: ${KUBEADM_VERSION}" + FAILURES=$((FAILURES + 1)) + fi + + # Verify containerd version + if echo "${ACTUAL_CONTAINERD}" | grep -q "${CONTAINERD_VERSION}"; then + echo "✓ containerd version ${CONTAINERD_VERSION} confirmed" + else + echo "::error::Expected containerd version ${CONTAINERD_VERSION} but got: ${ACTUAL_CONTAINERD}" + FAILURES=$((FAILURES + 1)) + fi + + # Verify cloud-init completed (not applicable to all distros) + if echo "${CLOUD_INIT_STATUS}" | grep -q "done\|disabled\|not available"; then + echo "✓ cloud-init status OK" + else + echo "::warning::cloud-init status: ${CLOUD_INIT_STATUS}" + fi + fi + + if [[ "${FAILURES}" -gt 0 ]]; then + echo "::error::${FAILURES} smoke test(s) failed" + exit 1 + fi + echo "All smoke tests passed" + + # -- Collect diagnostics on failure -------------------------------------- + - name: Collect diagnostics + if: failure() + env: + OS_TYPE: ${{ steps.vars.outputs.OS_TYPE }} + run: | + set -euo pipefail + ARTIFACTS="${GITHUB_WORKSPACE}/_artifacts" + mkdir -p "${ARTIFACTS}" + + echo "--- VM instance view ---" + az vm get-instance-view \ + --resource-group "${TEST_RESOURCE_GROUP}" \ + --name "${VM_NAME}" \ + --output json 2>&1 | tee "${ARTIFACTS}/vm-instance-view.json" || true + + if [[ "${OS_TYPE}" == "Windows" ]]; then + az vm run-command invoke \ + --resource-group "${TEST_RESOURCE_GROUP}" \ + --name "${VM_NAME}" \ + --command-id RunPowerShellScript \ + --scripts 'Get-EventLog -LogName Application -Newest 50 | Format-Table -AutoSize' \ + --output json 2>&1 | tee "${ARTIFACTS}/windows-eventlog.json" || true + else + SSH_OPTS="-o StrictHostKeyChecking=no -o ConnectTimeout=10 -i ${TMPDIR}/sshkey" + SSH_CMD="ssh ${SSH_OPTS} capi@${PUBLIC_IP}" + + echo "--- journalctl kubelet ---" + ${SSH_CMD} 'sudo journalctl -u kubelet --no-pager -n 100' 2>&1 | tee "${ARTIFACTS}/kubelet.log" || true + echo "--- journalctl containerd ---" + ${SSH_CMD} 'sudo journalctl -u containerd --no-pager -n 100' 2>&1 | tee "${ARTIFACTS}/containerd.log" || true + echo "--- cloud-init status ---" + ${SSH_CMD} 'cloud-init status --long' 2>&1 | tee "${ARTIFACTS}/cloud-init.log" || true + echo "--- dmesg tail ---" + ${SSH_CMD} 'dmesg | tail -50' 2>&1 | tee "${ARTIFACTS}/dmesg.log" || true + fi + + - name: Upload diagnostics + if: failure() + uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1 + with: + name: test-diagnostics + path: _artifacts/ + retention-days: 7 + + # -- Cleanup (always runs) ----------------------------------------------- + - name: Clean up test resource group + if: always() + run: | + set -euo pipefail + + TEST_RESOURCE_GROUP="${TEST_RESOURCE_GROUP:-}" + if [[ -n "${TEST_RESOURCE_GROUP}" ]]; then + echo "Deleting test resource group: ${TEST_RESOURCE_GROUP}" + az group delete -n "${TEST_RESOURCE_GROUP}" --yes --no-wait \ + --force-deletion-types=Microsoft.Compute/virtualMachines \ + || true + else + echo "No test resource group to clean up" + fi + + # --------------------------------------------------------------------------- + # Promote + # --------------------------------------------------------------------------- + approve_promotion: + name: "Approve Promotion: ${{ inputs.os }} ${{ inputs.os_version }} (k8s ${{ inputs.kubernetes_version }})" + needs: [build, test] + if: ${{ always() && !inputs.skip_promote && needs.build.result == 'success' && (needs.test.result == 'success' || needs.test.result == 'skipped') }} + runs-on: ubuntu-latest + environment: image-promotion-approval + steps: + - name: Promotion Approved + run: echo "Image promotion approved" + + promote: + name: Promote to Community Gallery + needs: [build, approve_promotion] + if: ${{ always() && needs.build.result == 'success' && needs.approve_promotion.result == 'success' }} + runs-on: ubuntu-latest + timeout-minutes: 120 + env: + RESOURCE_GROUP: ${{ inputs.resource_group }} + GALLERY_NAME: ${{ inputs.gallery_name }} + REPLICATED_REGIONS_INPUT: ${{ inputs.replicated_regions }} + + steps: + - name: Checkout repository + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + + - name: Download publishing info artifact + uses: actions/download-artifact@3e5f45b2cfb9172054b4087a40e8e0b5a5461e7c # v8.0.1 + with: + name: publishing-info + path: images/capi/packer/azure/sig/ + + - name: Import variables from build + id: vars + run: | + set -euo pipefail + + PUBLISHING_INFO=$(jq -c . images/capi/packer/azure/sig/sig-publishing-info.json) + echo "PUBLISHING_INFO=${PUBLISHING_INFO}" + + echo "DISTRO=$(echo "$PUBLISHING_INFO" | jq -r .distro)" >> $GITHUB_OUTPUT + echo "HYPERV_GEN=$(echo "$PUBLISHING_INFO" | jq -r .hyperv_gen)" >> $GITHUB_OUTPUT + echo "OS_TYPE=$(echo "$PUBLISHING_INFO" | jq -r .os_type)" >> $GITHUB_OUTPUT + echo "MANAGED_IMAGE_RESOURCE_GROUP_NAME=$(echo "$PUBLISHING_INFO" | jq -r .managed_image_resource_group_name)" >> $GITHUB_OUTPUT + echo "MANAGED_IMAGE_NAME=$(echo "$PUBLISHING_INFO" | jq -r .managed_image_name)" >> $GITHUB_OUTPUT + echo "MANAGED_IMAGE_ID=$(echo "$PUBLISHING_INFO" | jq -r .managed_image_id)" >> $GITHUB_OUTPUT + echo "MANAGED_IMAGE_LOCATION=$(echo "$PUBLISHING_INFO" | jq -r .managed_image_location)" >> $GITHUB_OUTPUT + echo "MANAGED_IMAGE_SHARED_IMAGE_GALLERY_ID=$(echo "$PUBLISHING_INFO" | jq -r .managed_image_shared_image_gallery_id)" >> $GITHUB_OUTPUT + echo "SHARED_IMAGE_GALLERY_RESOURCE_GROUP=$(echo "$PUBLISHING_INFO" | jq -r .shared_image_gallery_resource_group)" >> $GITHUB_OUTPUT + echo "SHARED_IMAGE_GALLERY_NAME=$(echo "$PUBLISHING_INFO" | jq -r .shared_image_gallery_name)" >> $GITHUB_OUTPUT + echo "SHARED_IMAGE_GALLERY_IMAGE_NAME=$(echo "$PUBLISHING_INFO" | jq -r .shared_image_gallery_image_name)" >> $GITHUB_OUTPUT + echo "SHARED_IMAGE_GALLERY_IMAGE_VERSION=$(echo "$PUBLISHING_INFO" | jq -r .shared_image_gallery_image_version)" >> $GITHUB_OUTPUT + echo "TAGS=$(echo "$PUBLISHING_INFO" | jq -r .tags)" >> $GITHUB_OUTPUT + + - name: Azure Login + uses: azure/login@532459ea530d8321f2fb9bb10d1e0bcf23869a43 # v3.0.0 + with: + client-id: ${{ secrets.AZURE_CLIENT_ID }} + tenant-id: ${{ secrets.AZURE_TENANT_ID }} + subscription-id: ${{ secrets.AZURE_SUBSCRIPTION_ID }} + + - name: Publish to community gallery + working-directory: images/capi + env: + DISTRO: ${{ steps.vars.outputs.DISTRO }} + HYPERV_GEN: ${{ steps.vars.outputs.HYPERV_GEN }} + OS_TYPE: ${{ steps.vars.outputs.OS_TYPE }} + MANAGED_IMAGE_ID: ${{ steps.vars.outputs.MANAGED_IMAGE_ID }} + MANAGED_IMAGE_LOCATION: ${{ steps.vars.outputs.MANAGED_IMAGE_LOCATION }} + SHARED_IMAGE_GALLERY_IMAGE_NAME: ${{ steps.vars.outputs.SHARED_IMAGE_GALLERY_IMAGE_NAME }} + SHARED_IMAGE_GALLERY_IMAGE_VERSION: ${{ steps.vars.outputs.SHARED_IMAGE_GALLERY_IMAGE_VERSION }} + TAGS: ${{ steps.vars.outputs.TAGS }} + EULA_LINK: ${{ vars.EULA_LINK }} + PUBLISHER_EMAIL: ${{ vars.PUBLISHER_EMAIL }} + PUBLISHER_URI: ${{ vars.PUBLISHER_URI }} + SIG_PUBLISHER: ${{ vars.SIG_PUBLISHER }} + run: | + set -euo pipefail + + EOL_DATE=$(date --date='+6 months' +"%Y-%m-%dT00:00:00+00:00") + GALLERY_DESCRIPTION="Shared image gallery for Cluster API Provider Azure" + GALLERY_NAME="${GALLERY_NAME}" + PUBLIC_NAME_PREFIX="ClusterAPI" + RESOURCE_GROUP="${RESOURCE_GROUP}" + SIG_OFFER="reference-images" + + # Set replicated regions (deduplicate in case build region is already in the list) + REPLICATED_REGIONS=$(echo "${MANAGED_IMAGE_LOCATION} ${REPLICATED_REGIONS_INPUT}" | tr ' ' '\n' | sort -u | tr '\n' ' ') + + # Create the resource group if needed + if ! az group show -n "${RESOURCE_GROUP}" -o none 2>/dev/null; then + az group create -n "${RESOURCE_GROUP}" -l "${MANAGED_IMAGE_LOCATION}" --tags ${TAGS:-} + fi + + # Create the public community shared image gallery if it doesn't exist + if ! az sig show --gallery-name "${GALLERY_NAME}" --resource-group "${RESOURCE_GROUP}" -o none 2>/dev/null; then + sig_create_args=( + --gallery-name "${GALLERY_NAME}" + --resource-group "${RESOURCE_GROUP}" + --description "${GALLERY_DESCRIPTION}" + --eula "${EULA_LINK}" + --location "${MANAGED_IMAGE_LOCATION}" + --public-name-prefix "${PUBLIC_NAME_PREFIX}" + --publisher-email "${PUBLISHER_EMAIL}" + --publisher-uri "${PUBLISHER_URI}" + --permissions Community + ) + if [[ -n "${TAGS:-}" ]]; then + sig_create_args+=(--tags ${TAGS}) + fi + az sig create "${sig_create_args[@]}" + fi + + # Translate prohibited words to alternatives in the image definition name + GALLERY_IMAGE_DEFINITION=${SHARED_IMAGE_GALLERY_IMAGE_NAME//ubuntu/ubun2} + GALLERY_IMAGE_DEFINITION=${GALLERY_IMAGE_DEFINITION//windows/win} + + # Create image definition if it doesn't exist + if ! az sig image-definition show --gallery-name "${GALLERY_NAME}" --gallery-image-definition "${GALLERY_IMAGE_DEFINITION}" --resource-group "${RESOURCE_GROUP}" -o none 2>/dev/null; then + az sig image-definition create \ + --resource-group "${RESOURCE_GROUP}" \ + --gallery-name "${GALLERY_NAME}" \ + --gallery-image-definition "${GALLERY_IMAGE_DEFINITION}" \ + --publisher "${SIG_PUBLISHER}" \ + --offer "${SIG_OFFER}" \ + --sku "${DISTRO}" \ + --hyper-v-generation "${HYPERV_GEN}" \ + --os-type "${OS_TYPE}" \ + | tee -a sig-publishing.json + fi + + # Delete the image version if it exists (always create a new image, overwriting if necessary) + if az sig image-version show --gallery-name "${GALLERY_NAME}" --gallery-image-definition "${GALLERY_IMAGE_DEFINITION}" --gallery-image-version "${SHARED_IMAGE_GALLERY_IMAGE_VERSION}" --resource-group "${RESOURCE_GROUP}" -o none 2>/dev/null; then + az sig image-version delete \ + --resource-group "${RESOURCE_GROUP}" \ + --gallery-name "${GALLERY_NAME}" \ + --gallery-image-definition "${GALLERY_IMAGE_DEFINITION}" \ + --gallery-image-version "${SHARED_IMAGE_GALLERY_IMAGE_VERSION}" + fi + + # Copy the tags from the managed image to the image version + IMAGE_TAGS=$(az tag list --resource-id "${MANAGED_IMAGE_ID}" | jq -r '.properties.tags | to_entries | map("\(.key)=\(.value)") | join(" ")') + + # Create the image version + az sig image-version create \ + --resource-group "${RESOURCE_GROUP}" \ + --gallery-name "${GALLERY_NAME}" \ + --gallery-image-definition "${GALLERY_IMAGE_DEFINITION}" \ + --gallery-image-version "${SHARED_IMAGE_GALLERY_IMAGE_VERSION}" \ + --target-regions ${REPLICATED_REGIONS} \ + --managed-image "${MANAGED_IMAGE_ID}" \ + --end-of-life-date "${EOL_DATE}" \ + --tags ${IMAGE_TAGS:-} \ + | tee sig-publishing.json + + - name: Upload publishing artifact + uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1 + with: + name: sig-publishing + path: images/capi/sig-publishing.json + retention-days: 30 + + # --------------------------------------------------------------------------- + # Clean + # --------------------------------------------------------------------------- + clean: + name: Clean Staging Resources + needs: [build, test, approve_promotion, promote] + if: ${{ always() && needs.build.result == 'success' }} + runs-on: ubuntu-latest + timeout-minutes: 30 + env: + RESOURCE_GROUP: ${{ inputs.resource_group }} + STAGING_GALLERY_NAME: ${{ inputs.staging_gallery_name }} + + steps: + - name: Checkout repository + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + + - name: Download publishing info artifact + uses: actions/download-artifact@3e5f45b2cfb9172054b4087a40e8e0b5a5461e7c # v8.0.1 + with: + name: publishing-info + path: images/capi/packer/azure/sig/ + + - name: Import variables from build + id: vars + run: | + set -euo pipefail + + PUBLISHING_INFO=$(jq -c . images/capi/packer/azure/sig/sig-publishing-info.json) + echo "PUBLISHING_INFO=${PUBLISHING_INFO}" + + echo "MANAGED_IMAGE_ID=$(echo "$PUBLISHING_INFO" | jq -r .managed_image_id)" >> $GITHUB_OUTPUT + echo "SHARED_IMAGE_GALLERY_IMAGE_NAME=$(echo "$PUBLISHING_INFO" | jq -r .shared_image_gallery_image_name)" >> $GITHUB_OUTPUT + echo "SHARED_IMAGE_GALLERY_IMAGE_VERSION=$(echo "$PUBLISHING_INFO" | jq -r .shared_image_gallery_image_version)" >> $GITHUB_OUTPUT + + - name: Azure Login + uses: azure/login@532459ea530d8321f2fb9bb10d1e0bcf23869a43 # v3.0.0 + with: + client-id: ${{ secrets.AZURE_CLIENT_ID }} + tenant-id: ${{ secrets.AZURE_TENANT_ID }} + subscription-id: ${{ secrets.AZURE_SUBSCRIPTION_ID }} + + - name: Clean up staging resources + working-directory: images/capi + env: + MANAGED_IMAGE_ID: ${{ steps.vars.outputs.MANAGED_IMAGE_ID }} + SHARED_IMAGE_GALLERY_IMAGE_NAME: ${{ steps.vars.outputs.SHARED_IMAGE_GALLERY_IMAGE_NAME }} + SHARED_IMAGE_GALLERY_IMAGE_VERSION: ${{ steps.vars.outputs.SHARED_IMAGE_GALLERY_IMAGE_VERSION }} + run: | + set -euo pipefail + + GALLERY_NAME="${STAGING_GALLERY_NAME:-staging_gallery}" + RESOURCE_GROUP="${RESOURCE_GROUP:-cluster-api-gallery}" + + # Delete the source managed image if it exists + if az image show --ids "${MANAGED_IMAGE_ID}" -o none 2>/dev/null; then + echo "Deleting managed image: ${MANAGED_IMAGE_ID}" + az image delete --ids "${MANAGED_IMAGE_ID}" + else + echo "Managed image not found, skipping deletion" + fi + + # Delete the staging image version if it exists + if az sig image-version show --resource-group "${RESOURCE_GROUP}" --gallery-name "${GALLERY_NAME}" --gallery-image-definition "${SHARED_IMAGE_GALLERY_IMAGE_NAME}" --gallery-image-version "${SHARED_IMAGE_GALLERY_IMAGE_VERSION}" -o none 2>/dev/null; then + echo "Deleting staging image version: ${SHARED_IMAGE_GALLERY_IMAGE_VERSION}" + az sig image-version delete \ + --resource-group "${RESOURCE_GROUP}" \ + --gallery-name "${GALLERY_NAME}" \ + --gallery-image-definition "${SHARED_IMAGE_GALLERY_IMAGE_NAME}" \ + --gallery-image-version "${SHARED_IMAGE_GALLERY_IMAGE_VERSION}" + else + echo "Staging image version not found, skipping deletion" + fi diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES index b2bee5f1f0..c3c33cecda 100644 --- a/OWNERS_ALIASES +++ b/OWNERS_ALIASES @@ -18,8 +18,10 @@ aliases: image-builder-openstack-maintainers: - yankcrime image-builder-cloudstack-reviewers: - - rohityadavcloud - davidjumani + - vishesh92 + - weizhouapache + - yadvr image-builder-scaleway-reviewers: - Tomy2e - Mia-Cross @@ -66,9 +68,11 @@ aliases: - seanschneeweiss - tobiasgiese cluster-api-cloudstack-maintainers: - - rohityadavcloud - davidjumani - Pearl1594 + - vishesh92 + - weizhouapache + - yadvr cluster-api-vsphere-maintainers: - chrischdi - gab-satchi diff --git a/README.md b/README.md index 5493ff670a..5c257f1a2d 100644 --- a/README.md +++ b/README.md @@ -22,19 +22,16 @@ The table below shows the currently provided operating systems for each provider | OS | ami | azure | digitalocean | gce | hcloud | huaweicloud | maas | nutanix | oci | openstack | outscale | ova | powervs | proxmox | qemu | raw | scaleway | vultr | |-------------------|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----| +| AlmaLinux 9 | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | | Amazon Linux 2 | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | | Amazon Linux 2023 | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | | Azure Linux 3 | ❌ | 💙 | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | -| CentOS 8 | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ | | CentOS 9 | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ✅ | ❌ | ✅ | ❌ | ❌ | ❌ | | Flatcar | ✅ | 💙 | ❌ | ❌ | ✅ | ❌ | ❌ | ✅ | ❌ | ✅ | ❌ | 💙 | ❌ | ✅ | ✅ | ✅ | ❌ | ❌ | -| Oracle Linux 8 | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | | Oracle Linux 9 | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | | Photon 4 | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | | Photon 5 | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | 💙 | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | -| RHEL 8 | ✅ | ✅ | ❌ | 💙 | ❌ | ❌ | ❌ | ✅ | ❌ | ❌ | ❌ | ✅ | ❌ | ❌ | ✅ | ✅ | ❌ | ❌ | | RHEL 9 | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ✅ | ❌ | ❌ | ❌ | ✅ | ❌ | ❌ | ✅ | ✅ | ❌ | ❌ | -| Rocky Linux 8 | ✅ | ❌ | ❌ | ❌ | ✅ | ❌ | ❌ | ✅ | ❌ | ❌ | ❌ | ✅ | ❌ | ❌ | ✅ | ❌ | ❌ | ❌ | | Rocky Linux 9 | ❌ | ❌ | ❌ | ❌ | ✅ | ❌ | ❌ | ✅ | ❌ | ✅ | ❌ | 💙 | ❌ | ✅ | ✅ | ❌ | ✅ | ❌ | | Ubuntu 22.04 | ✅ | 💙 | ✅ | 💙 | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | 💙 | ❌ | ✅ | ✅ | ✅ | ✅ | ✅ | | Ubuntu 24.04 | ✅ | 💙 | ✅ | 💙 | ✅ | ❌ | ✅ | ✅ | ❌ | ✅ | ✅ | 💙 | ❌ | ✅ | ✅ | ✅ | ✅ | ✅ | diff --git a/RELEASE.md b/RELEASE.md index 3bed596d74..a120cce83a 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -1,11 +1,11 @@ # Image Builder Releases -The current release of Image Builder is [v0.1.46][] (August 25, 2025). The corresponding container image is `registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.46`. +The current release of Image Builder is [v0.1.50][] (April 1, 2026). The corresponding container image is `registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.50`. ## Release Process For more detail about image-builder project releases, see the [Image Builder Book][]. -[v0.1.46]: https://github.com/kubernetes-sigs/image-builder/releases/tag/v0.1.46 +[v0.1.50]: https://github.com/kubernetes-sigs/image-builder/releases/tag/v0.1.50 [Image Builder Book]: https://image-builder.sigs.k8s.io/capi/releasing.html diff --git a/cloudbuild.yaml b/cloudbuild.yaml index e872db977a..9ad4a1e13a 100644 --- a/cloudbuild.yaml +++ b/cloudbuild.yaml @@ -3,7 +3,7 @@ timeout: 3000s options: substitution_option: ALLOW_LOOSE steps: - - name: 'gcr.io/k8s-staging-test-infra/gcb-docker-gcloud@sha256:63840f133e0dfeea0af9ef391210da7fab9d2676172e2967fccab0cd6110c4e7' # v20250513-9264efb079 + - name: 'gcr.io/k8s-staging-test-infra/gcb-docker-gcloud@sha256:ff388e0dc16351e96f8464e2e185b74a7578a5ccb7a112cf3393468e59e6e2d2' # v20260205-38cfa9523f entrypoint: make env: - DOCKER_CLI_EXPERIMENTAL=enabled diff --git a/docs/book/src/capi/capi.md b/docs/book/src/capi/capi.md index 0fdacc148f..1a4f2f1569 100644 --- a/docs/book/src/capi/capi.md +++ b/docs/book/src/capi/capi.md @@ -61,6 +61,7 @@ Several variables can be used to customize the image build. | `firstboot_custom_roles_pre`
`firstboot_custom_roles_post`
`node_custom_roles_pre`
`node_custom_roles_post` | Each of these four variables allows for giving a space delimited string of custom Ansible roles to run at different times. The "pre" roles run as the very first thing in the playbook (useful for setting up environment specifics like networking changes), and the "post" roles as the very last (useful for undoing those changes, custom additions, etc). Note that the "post" role does run before the "sysprep" role in the "node" playbook, as the "sysprep" role seals the image. If the role is placed in the `ansible/roles` directory, it can be referenced by name. Otherwise, it must be a fully qualified path to the role. | `""` | | `disable_public_repos` | If set to `"true"`, this will disable all existing package repositories defined in the OS before doing any package installs. The `extra_repos` variable *must* be set for package installs to succeed. | `"false"` | | `extra_debs` | This can be set to a space delimited string containing the names of additional deb packages to install | `""` | +| `extra_kernel_boot_params` | This can be set to a space delimited string containing the boot kernel parameters (e.g.: cpufreq.default_governor=performance) | `""` | | `extra_repos` | A space delimited string containing the names of files to add to the image containing repository definitions. The files should be given as absolute paths. | `""` | | `extra_rpms` | This can be set to a space delimited string containing the names of additional RPM packages to install | `""` | | `http_proxy` | This can be set to URL to use as an HTTP proxy during the Ansible stage of building | `""` | @@ -73,7 +74,7 @@ Several variables can be used to customize the image build. | `no_proxy` | This can be set to a comma-delimited list of domains that should be excluded from proxying during the Ansible stage of building | `""` | | `reenable_public_repos` | If set to `"false"`, the package repositories disabled by setting `disable_public_repos` will remain disabled at the end of the build. | `"true"` | | `remove_extra_repos` | If set to `"true"`, the package repositories added to the OS through the use of `extra_repos` will be removed at the end of the build. | `"false"` | -| `pause_image` | This can be used to override the default pause image used to hold the network namespace and IP for the pod. | `"registry.k8s.io/pause:3.10"` | +| `pause_image` | This can be used to override the default pause image used to hold the network namespace and IP for the pod. | `"registry.k8s.io/pause:3.10.2"` | | `pip_conf_file` | The path to a file to be copied into the image at `/etc/pip.conf` for use as a global config file. This file will be removed at the end of the build if `remove_extra_repos` is `true`. | `""` | | `containerd_additional_settings` | This is a string, base64 encoded, that contains additional configuration for containerd. Version 2 and 3 are supported, please use the appropriate version based on your containerd version. It must not contain the pause image configuration block. See `image-builder/images/capi/ansible/roles/containerd/templates/etc/containerd/config.toml` for the template. | `null` | | `load_additional_components` | If set to `"true"`, the `load_additional_components` role will be executed. This needs to be set to `"true"` if any of `additional_url_images`, `additional_registry_images` or `additional_executables` are set to `"true"` | `"false"` | @@ -231,3 +232,15 @@ Put the Ansible role files in the `ansible/roles` directory. ``` Note, for backwards compatibility reasons, the variable `custom_role_names` is still accepted as an alternative to `node_custom_roles_post`, and they are functionally equivalent. + +##### Reenabling Flatcar USB devices + +Flatcar usb devices are disabled by default for security reasons. +See [flatcar documentation](https://www.flatcar.org/docs/latest/setup/security/hardening-guide/#disable-usb) for more information. +To reenable them, set the following variable: + +```json +{ + "ansible_user_vars": "disable_flatcar_usb=false" +} +``` diff --git a/docs/book/src/capi/container-image.md b/docs/book/src/capi/container-image.md index 75563e1c2b..c843ba07e6 100644 --- a/docs/book/src/capi/container-image.md +++ b/docs/book/src/capi/container-image.md @@ -18,7 +18,7 @@ Run the docker build target of Makefile The latest image-builder container image release is available here: ```commandline -docker pull registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.46 +docker pull registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.50 ``` ### Examples @@ -27,18 +27,17 @@ docker pull registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v - If the AWS CLI is already installed on your machine, you can simply mount the `~/.aws` folder that stores all the required credentials. ```commandline - docker run -it --rm -v /Users//.aws:/home/imagebuilder/.aws registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.46 build-ami-ubuntu-2404 + docker run -it --rm -v /Users//.aws:/home/imagebuilder/.aws registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.50 build-ami-ubuntu-2404 ``` - Another alternative is to use an `aws-creds.env` file to load the credentials and pass it during docker run. ```commandline AWS_ACCESS_KEY_ID=xxxxxxx AWS_SECRET_ACCESS_KEY=xxxxxxxx - AWS_DEFAULT_REGION=xxxxxx ``` ```commandline - docker run -it --rm --env-file aws-creds.env registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.46 build-ami-ubuntu-2404 + docker run -it --rm --env-file aws-creds.env registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.50 build-ami-ubuntu-2404 ``` - AZURE @@ -52,7 +51,7 @@ docker pull registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v ``` ```commandline - docker run -it --rm --env-file az-creds.env registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.46 build-azure-sig-ubuntu-2404 + docker run -it --rm --env-file az-creds.env registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.50 build-azure-sig-ubuntu-2404 ``` - Proxmox @@ -84,7 +83,7 @@ docker pull registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v - Docker's `--net=host` option to ensure http server starts with the host IP and not the Docker container IP. This option is Linux specific and thus implies that it can be run only from a Linux machine. ```commandline - docker run -it --rm --net=host --env PACKER_VAR_FILES=/home/imagebuilder/vsphere.json -v :/home/imagebuilder/vsphere.json registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.46 build-node-ova-vsphere-ubuntu-2404 + docker run -it --rm --net=host --env PACKER_VAR_FILES=/home/imagebuilder/vsphere.json -v :/home/imagebuilder/vsphere.json registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.50 build-node-ova-vsphere-ubuntu-2404 ``` In addition to this, further customizations can be done as discussed [here](./capi.md#customization). diff --git a/docs/book/src/capi/containerd/customizing-containerd.md b/docs/book/src/capi/containerd/customizing-containerd.md index 713244789a..e6baaf9c35 100644 --- a/docs/book/src/capi/containerd/customizing-containerd.md +++ b/docs/book/src/capi/containerd/customizing-containerd.md @@ -73,3 +73,14 @@ root@sandboxed-container:/# dmesg ``` You are running a sandboxed container. + +## Additional Customizations + +Containerd can be further customized in a couple of ways. One option that is directly inserted into the containerd +[`config.toml`](https://github.com/kubernetes-sigs/image-builder/blob/main/images/capi/ansible/roles/containerd/templates/etc/containerd/config.toml#L14) +is to override the image pull progress timeout. This can be done using `containerd_image_pull_progress_timeout`. + +You can also add further configuration by adding values for `containerd_additional_settings`. This is rendered at the +end of the +[`config.toml`](https://github.com/kubernetes-sigs/image-builder/blob/main/images/capi/ansible/roles/containerd/templates/etc/containerd/config.toml#L86) +default template. diff --git a/docs/book/src/capi/providers/aws.md b/docs/book/src/capi/providers/aws.md index 17d6889223..f6f11c527e 100644 --- a/docs/book/src/capi/providers/aws.md +++ b/docs/book/src/capi/providers/aws.md @@ -43,7 +43,6 @@ the different operating systems. | `amazon-2.json` | The settings for the Amazon 2 Linux image | | `flatcar.json` | The settings for the Flatcar image | | `flatcar-arm64.json` | The settings for the Flatcar arm64 image | -| `rhel-8.json` | The settings for the RHEL 8 image | | `rockylinux.json` | The settings for the Rocky Linux image | | `ubuntu-2204.json` | The settings for the Ubuntu 22.04 image | | `ubuntu-2404.json` | The settings for the Ubuntu 24.04 image | diff --git a/docs/book/src/capi/providers/azure.md b/docs/book/src/capi/providers/azure.md index 9c3a736522..7a00ea4a9c 100644 --- a/docs/book/src/capi/providers/azure.md +++ b/docs/book/src/capi/providers/azure.md @@ -50,8 +50,8 @@ Confidential VMs require specific generation 2 OS images. The naming pattern of # Ubuntu 24.04 LTS for Confidential VMs make build-azure-sig-ubuntu-2404-cvm -# Windows 2019 with containerd for Confindential VMs -make build-azure-sig-windows-2019-containerd-cvm +# Windows 2022 with containerd for Confindential VMs +make build-azure-sig-windows-2022-containerd-cvm ``` ### Configuration diff --git a/docs/book/src/capi/providers/gcp.md b/docs/book/src/capi/providers/gcp.md index f84852d0a0..0fec5d46c2 100644 --- a/docs/book/src/capi/providers/gcp.md +++ b/docs/book/src/capi/providers/gcp.md @@ -50,7 +50,6 @@ The `gce` sub-directory inside `images/capi/packer` stores JSON configuration fi | -------- | -------- | `ubuntu-2204.json` | Settings for Ubuntu 22.04 image | | `ubuntu-2404.json` | Settings for Ubuntu 24.04 image | -| `rhel-8.json` | Settings for RHEL 8 image | #### Common GCP options diff --git a/docs/book/src/capi/providers/hcloud.md b/docs/book/src/capi/providers/hcloud.md index 97dbae2c9e..e35759075b 100644 --- a/docs/book/src/capi/providers/hcloud.md +++ b/docs/book/src/capi/providers/hcloud.md @@ -26,7 +26,6 @@ the different operating systems. |----------------------|------------------------------------------| | `flatcar.json` | The settings for the Flatcar image | | `flatcar-arm64.json` | The settings for the Flatcar arm64 image | -| `rockylinux-8.json` | The settings for the RockyLinux 8 image | | `rockylinux-9.json` | The settings for the RockyLinux 9 image | | `ubuntu-2204.json` | The settings for the Ubuntu 22.04 image | | `ubuntu-2404.json` | The settings for the Ubuntu 24.04 image | diff --git a/docs/book/src/capi/providers/ibmcloud.md b/docs/book/src/capi/providers/ibmcloud.md index d700c6877b..6ef9225bf0 100644 --- a/docs/book/src/capi/providers/ibmcloud.md +++ b/docs/book/src/capi/providers/ibmcloud.md @@ -18,14 +18,14 @@ $ cd image-builder/images/capi/ $ make deps-powervs ``` -From the `images/capi` directory, run `make build-powervs-centos-8`. The image is built and uploaded to your bucket capibm-powervs-{BUILD_NAME}-{KUBERNETES_VERSION}-{BUILD_TIMESTAMP}. +From the `images/capi` directory, run `make build-powervs-centos-9`. The image is built and uploaded to your bucket capibm-powervs-{BUILD_NAME}-{KUBERNETES_VERSION}-{BUILD_TIMESTAMP}. > **Note:** Fill the required fields which are listed [here](#common-powervs-options) in a json file and pass it to the `PACKER_VAR_FILES` environment variable while building the image. -For building a centos-streams8 based CAPI image, run the following commands - +For building a centos-streams9 based CAPI image, run the following commands - ```bash -$ ANSIBLE_SSH_ARGS="-o HostKeyAlgorithms=+ssh-rsa -o PubkeyAcceptedAlgorithms=+ssh-rsa" PACKER_VAR_FILES=variables.json make build-powervs-centos-8 +$ ANSIBLE_SSH_ARGS="-o HostKeyAlgorithms=+ssh-rsa -o PubkeyAcceptedAlgorithms=+ssh-rsa" PACKER_VAR_FILES=variables.json make build-powervs-centos-9 ``` ### Configuration @@ -34,8 +34,7 @@ In addition to the configuration found in `images/capi/packer/config`, the `powe | File | Description | |------|-------------| -| `centos-8.json` | The settings for the CentOS 8 image | -| `centos-9.json` | The settings for the CentOS 8 image | +| `centos-9.json` | The settings for the CentOS 9 image | #### Common PowerVS options @@ -60,7 +59,7 @@ The parameters can be set via a variable file and passed via `PACKER_VAR_FILES`. > **Note:** -> 1. When setting `dhcp_network: true`, you need to build an OS image with certain network settings using [pvsadm tool](https://github.com/ppc64le-cloud/pvsadm/blob/main/docs/Build%20DHCP%20enabled%20Centos%20Images.md) and replace [the fields](https://github.com/kubernetes-sigs/image-builder/blob/cb925047f388090a0db3430ca3172da63eff952c/images/capi/packer/powervs/centos-8.json#L6) with the custom image details. +> 1. When setting `dhcp_network: true`, you need to build an OS image with certain network settings using [pvsadm tool](https://github.com/ppc64le-cloud/pvsadm/blob/main/docs/Build%20DHCP%20enabled%20Centos%20Images.md) and replace [the fields](https://github.com/kubernetes-sigs/image-builder/blob/main/images/capi/packer/powervs/centos-9.json#L6) with the custom image details. > 2. Clone the image-builder repo and run `make build` commands from a system where the DHCP private IP can be reached and SSH able. ## CAPIBM - VPC diff --git a/docs/book/src/capi/providers/maas.md b/docs/book/src/capi/providers/maas.md index 178dbf5751..2db31b0b9a 100644 --- a/docs/book/src/capi/providers/maas.md +++ b/docs/book/src/capi/providers/maas.md @@ -74,3 +74,44 @@ maas admin boot-resources create name=custom/your-image architecture=amd64/gener ``` **Note:** Set `base_image=ubuntu/jammy` for Ubuntu 22.04 or `ubuntu/noble` for 24.04. + +## Custom Curtin Scripts +If you need to override the default MaaS curtin scripts, create a custom role containing the curtin hooks. The files must be copied to the `/curtin` directory + +For more information on how to create and use custom roles, refer to the official documentation: https://image-builder.sigs.k8s.io/capi/capi#customization + +## iSCSI configuration note: + +If you need unique names for the iSCSI InitiatorName, add a KubeadmConfigTemplate and include the following command under `spec.template.spec.preKubeadmCommands` + +```bash +echo "InitiatorName=$(iscsi-iname -p iqn.2004-10.com.ubuntu:$(cat /etc/hostname))" > /etc/iscsi/initiatorname.iscsi +``` + +### Example + +```yaml +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: t-cluster-md-0 + namespace: default +spec: + template: + spec: + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + event-qps: "0" + feature-gates: RotateKubeletServerCertificate=true + read-only-port: "0" + name: '{{ v1.local_hostname }}' + preKubeadmCommands: + - echo "InitiatorName=$(iscsi-iname -p iqn.2004-10.com.ubuntu:$(cat /etc/hostname))" > /etc/iscsi/initiatorname.iscsi + - systemctl restart open-iscsi + - while [ ! -S /var/run/containerd/containerd.sock ]; do echo 'Waiting for containerd...'; + sleep 1; done + - sed -ri '/\sswap\s/s/^#?/#/' /etc/fstab + - swapoff -a + useExperimentalRetryJoin: true +``` diff --git a/docs/book/src/capi/providers/nutanix.md b/docs/book/src/capi/providers/nutanix.md index 888f8b6e74..79d11776ce 100644 --- a/docs/book/src/capi/providers/nutanix.md +++ b/docs/book/src/capi/providers/nutanix.md @@ -89,9 +89,7 @@ The `nutanix` sub-directory inside `images/capi/packer` stores JSON configuratio | File | Description | |---------------------|-----------------------------------------------| | `ubuntu-2204.json` | Settings for Ubuntu 22.04 image | -| `rockylinux-8.json` | Settings for Rocky Linux 8 image (UEFI) | | `rockylinux-9.json` | Settings for Rocky Linux 9 image | -| `rhel-8.json` | Settings for RedHat Enterprise Linux 8 image | | `rhel-9.json` | Settings for RedHat Enterprise Linux 9 image | | `flatcar.json` | Settings for Flatcar Linux image (beta) | | `windows-2022.json` | Settings for Windows Server 2022 image (beta) | diff --git a/docs/book/src/capi/providers/oci.md b/docs/book/src/capi/providers/oci.md index 722adcff42..290c12d47a 100644 --- a/docs/book/src/capi/providers/oci.md +++ b/docs/book/src/capi/providers/oci.md @@ -26,7 +26,6 @@ the different operating systems. | File | Description | |------|-------------| -| `oracle-linux-8.json` | The settings for the Oracle Linux 8 image | | `oracle-linux-9.json` | The settings for the Oracle Linux 9 image | | `ubuntu-2204.json` | The settings for the Ubuntu 22.04 image | | `windows-2019.json` | The settings for the Windows Server 2019 image | @@ -62,7 +61,7 @@ Create a file with the following contents and name it as `oci.json` #### Example make command with Packer VAR file ```bash -PACKER_VAR_FILES=oci.json make build-oci-oracle-linux-8 +PACKER_VAR_FILES=oci.json make build-oci-oracle-linux-9 ``` #### Build an Arm based image diff --git a/docs/book/src/capi/providers/vsphere.md b/docs/book/src/capi/providers/vsphere.md index b17e7e290d..f774f4cff5 100644 --- a/docs/book/src/capi/providers/vsphere.md +++ b/docs/book/src/capi/providers/vsphere.md @@ -79,7 +79,6 @@ In addition to the configuration found in `images/capi/packer/config`, the `ova` |--------------------|--------------------------------------------------------------| | `flatcar.json` | The settings for the Flatcar image | | `photon-4.json` | The settings for the Photon 4 image | -| `rhel-8.json` | The settings for the RHEL 8 image | | `rhel-9.json` | The settings for the RHEL 9 image | | `ubuntu-2204.json` | The settings for the Ubuntu 22.04 image | | `ubuntu-2204-efi.json` | The settings for the Ubuntu 22.04 EFI image | diff --git a/docs/book/src/capi/quickstart.md b/docs/book/src/capi/quickstart.md index 78b0d07ef9..4fa176830b 100644 --- a/docs/book/src/capi/quickstart.md +++ b/docs/book/src/capi/quickstart.md @@ -4,7 +4,7 @@ In this tutorial we will cover the basics of how to download and execute the Ima ## Installation -As a set of scripts and Makefiles that rely on Packer and Ansible, there is image builder binary/application to install. Rather we need to download the tooling from the GitHub repo and make sure that the Packer and Ansible are installed. +As a set of scripts and Makefiles that rely on Packer and Ansible, there is no image builder binary/application to install. Rather we need to download the tooling from the GitHub repo and make sure that the Packer and Ansible are installed. To get the latest image-builder source on your machine, choose one of the following methods: diff --git a/docs/book/src/capi/releasing.md b/docs/book/src/capi/releasing.md index 7a010e94de..2d5d4b4e2b 100644 --- a/docs/book/src/capi/releasing.md +++ b/docs/book/src/capi/releasing.md @@ -1,10 +1,11 @@ # Image Builder Releases -The current release of Image Builder is [v0.1.47][] (August 25, 2025). The corresponding container image is `registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.47`. +The current release of Image Builder is [v0.1.50][] (April 1, 2026). The corresponding container image is `registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.50`. ## Release Process Releasing image-builder is a simple process: project maintainers should be able to follow the steps below in order to create a new release. +Before proceeding, make sure the current SHA being used in [`cloudbuild.yaml`](../../../../cloudbuild.yaml) is [still valid](https://console.cloud.google.com/artifacts/docker/k8s-staging-test-infra/us/gcr.io/gcb-docker-gcloud). If it is not, then the job that runs on tag will fail. ### Create a tag @@ -22,7 +23,7 @@ Releases in image-builder follow [semantic versioning][semver] conventions. Curr - *If signing tags with GPG, makes your key available to the `git tag` command.* - Create a new tag: - `export IB_VERSION=v0.1.x` - - *Replace `x` with the next patch version. For example: `v0.1.47`.* + - *Replace `x` with the next patch version. For example: `v0.1.51`.* - `git tag -s -m "Image Builder ${IB_VERSION}" ${IB_VERSION}` - `git push upstream ${IB_VERSION}` @@ -77,14 +78,14 @@ Wait for this PR to merge before communicating the release to users, so image-bu In the [#image-builder channel][] on the Kubernetes Slack, post a message announcing the new release. Include a link to the GitHub release and a thanks to the contributors: ``` -Image-builder v0.1.47 is now available: https://github.com/kubernetes-sigs/image-builder/releases/tag/v0.1.47 +Image-builder v0.1.51 is now available: https://github.com/kubernetes-sigs/image-builder/releases/tag/v0.1.51 Thanks to all contributors! ``` -[v0.1.47]: https://github.com/kubernetes-sigs/image-builder/releases/tag/v0.1.47 +[v0.1.50]: https://github.com/kubernetes-sigs/image-builder/releases/tag/v0.1.50 [#image-builder channel]: https://kubernetes.slack.com/archives/C01E0Q35A8J [Personal access tokens]: https://github.com/settings/tokens [post-image-builder-push-images]: https://prow.k8s.io/?repo=kubernetes-sigs%2Fimage-builder&type=postsubmit&job=post-image-builder-push-images [releases page]: https://github.com/kubernetes-sigs/image-builder/releases [semver]: https://semver.org/#semantic-versioning-200 -[staging repository]: https://console.cloud.google.com/gcr/images/k8s-staging-scl-image-builder/GLOBAL/cluster-node-image-builder-amd64 +[staging repository]: https://console.cloud.google.com/artifacts/docker/k8s-staging-scl-image-builder/us/gcr.io/cluster-node-image-builder-amd64 diff --git a/images/capi/.ansible-lint-ignore b/images/capi/.ansible-lint-ignore index f2dc791c89..aeff3f0a5b 100644 --- a/images/capi/.ansible-lint-ignore +++ b/images/capi/.ansible-lint-ignore @@ -1,111 +1,55 @@ # This file contains ignores rule violations for ansible-lint -ansible/firstboot.yml name[missing] -ansible/firstboot.yml name[play] -ansible/node.yml name[missing] -ansible/node.yml name[play] -ansible/python.yml name[missing] -ansible/python.yml name[play] -ansible/roles/containerd/tasks/main.yml name[missing] ansible/roles/containerd/tasks/photon.yml no-changed-when +ansible/roles/containerd/defaults/main.yml var-naming[no-role-prefix] ansible/roles/ecr_credential_provider/tasks/main.yaml no-changed-when ansible/roles/ecr_credential_provider/tasks/main.yaml yaml[line-length] -ansible/roles/firstboot/tasks/main.yaml name[missing] -ansible/roles/firstboot/tasks/qemu.yml name[missing] ansible/roles/gpu/tasks/amd.yml no-changed-when ansible/roles/gpu/tasks/nvidia.yml no-changed-when ansible/roles/kubernetes/defaults/main.yml var-naming[no-role-prefix] ansible/roles/kubernetes/defaults/main.yml yaml[line-length] -ansible/roles/kubernetes/tasks/crictl-url.yml name[template] -ansible/roles/kubernetes/tasks/debian.yml jinja[spacing] -ansible/roles/kubernetes/tasks/ecrpull.yml command-instead-of-shell ansible/roles/kubernetes/tasks/ecrpull.yml no-changed-when -ansible/roles/kubernetes/tasks/kubeadmpull.yml command-instead-of-shell ansible/roles/kubernetes/tasks/kubeadmpull.yml no-changed-when -ansible/roles/kubernetes/tasks/main.yml name[missing] -ansible/roles/kubernetes/tasks/photon.yml jinja[spacing] ansible/roles/kubernetes/tasks/photon.yml no-changed-when -ansible/roles/kubernetes/tasks/redhat.yml jinja[spacing] -ansible/roles/kubernetes/tasks/url.yml command-instead-of-shell ansible/roles/kubernetes/tasks/url.yml no-changed-when ansible/roles/load_additional_components/defaults/main.yml var-naming[no-role-prefix] -ansible/roles/load_additional_components/tasks/main.yml name[missing] -ansible/roles/load_additional_components/tasks/registry.yml command-instead-of-shell ansible/roles/load_additional_components/tasks/registry.yml no-changed-when -ansible/roles/load_additional_components/tasks/url.yml command-instead-of-shell ansible/roles/load_additional_components/tasks/url.yml no-changed-when ansible/roles/load_additional_components/tasks/url.yml yaml[line-length] ansible/roles/node/defaults/main.yml var-naming[no-role-prefix] ansible/roles/node/tasks/main.yml command-instead-of-module -ansible/roles/node/tasks/main.yml name[missing] ansible/roles/node/tasks/main.yml no-changed-when ansible/roles/providers/defaults/main.yml var-naming[no-role-prefix] -ansible/roles/providers/tasks/aws.yml command-instead-of-shell -ansible/roles/providers/tasks/aws.yml name[missing] ansible/roles/providers/tasks/aws.yml no-changed-when ansible/roles/providers/tasks/awscliv2.yml no-changed-when -ansible/roles/providers/tasks/awscliv2.yml package-latest -ansible/roles/providers/tasks/azure.yml name[missing] -ansible/roles/providers/tasks/cloudstack.yml command-instead-of-shell ansible/roles/providers/tasks/cloudstack.yml no-changed-when -ansible/roles/providers/tasks/googlecompute.yml command-instead-of-shell ansible/roles/providers/tasks/googlecompute.yml no-changed-when -ansible/roles/providers/tasks/main.yml name[missing] -ansible/roles/providers/tasks/nutanix.yml name[missing] -ansible/roles/providers/tasks/raw.yml command-instead-of-shell ansible/roles/providers/tasks/raw.yml no-changed-when ansible/roles/providers/tasks/vmware-photon.yml no-changed-when -ansible/roles/providers/tasks/vmware-redhat.yml command-instead-of-shell ansible/roles/providers/tasks/vmware-redhat.yml no-changed-when -ansible/roles/providers/tasks/vmware.yml name[missing] ansible/roles/python/defaults/main.yml var-naming[no-role-prefix] ansible/roles/python/tasks/flatcar.yml no-changed-when -ansible/roles/python/tasks/main.yml name[missing] ansible/roles/python/tasks/main.yml no-changed-when -ansible/roles/security/tasks/trivy.yml jinja[spacing] ansible/roles/setup/defaults/main.yml var-naming[no-role-prefix] -ansible/roles/setup/tasks/azurelinux.yml name[missing] -ansible/roles/setup/tasks/azurelinux.yml package-latest -ansible/roles/setup/tasks/debian.yml command-instead-of-module ansible/roles/setup/tasks/debian.yml no-changed-when -ansible/roles/setup/tasks/debian.yml package-latest -ansible/roles/setup/tasks/flatcar.yml name[missing] -ansible/roles/setup/tasks/main.yml name[missing] -ansible/roles/setup/tasks/photon.yml name[missing] ansible/roles/setup/tasks/photon.yml no-changed-when ansible/roles/setup/tasks/redhat.yml command-instead-of-module -ansible/roles/setup/tasks/redhat.yml name[missing] ansible/roles/setup/tasks/redhat.yml no-changed-when -ansible/roles/setup/tasks/redhat.yml package-latest ansible/roles/setup/tasks/rpm_repos.yml no-changed-when ansible/roles/sysprep/defaults/main.yml var-naming[no-role-prefix] -ansible/roles/sysprep/tasks/azurelinux.yml name[missing] ansible/roles/sysprep/tasks/debian.yml no-changed-when ansible/roles/sysprep/tasks/flatcar.yml no-changed-when -ansible/roles/sysprep/tasks/main.yml name[missing] ansible/roles/sysprep/tasks/main.yml no-changed-when -ansible/roles/sysprep/tasks/photon.yml name[missing] ansible/roles/sysprep/tasks/photon.yml no-changed-when ansible/roles/sysprep/tasks/redhat.yml command-instead-of-module -ansible/roles/sysprep/tasks/redhat.yml name[missing] ansible/roles/sysprep/tasks/redhat.yml no-changed-when ansible/roles/sysprep/tasks/rpm_repos.yml no-changed-when ansible/windows/example.vars.yml yaml[line-length] -ansible/windows/roles/cloudbase-init/tasks/main.yml schema[tasks] ansible/windows/roles/debug/defaults/main.yml var-naming[no-role-prefix] -ansible/windows/roles/gmsa/tasks/main.yml name[missing] -ansible/windows/roles/kubernetes/tasks/kubelet.yml name[missing] ansible/windows/roles/kubernetes/tasks/kubelet.yml yaml[line-length] -ansible/windows/roles/kubernetes/tasks/main.yml name[missing] ansible/windows/roles/kubernetes/tasks/sc.yml yaml[line-length] ansible/windows/roles/load_additional_components/defaults/main.yml var-naming[no-role-prefix] -ansible/windows/roles/load_additional_components/tasks/main.yml name[missing] ansible/windows/roles/providers/defaults/main.yml var-naming[no-role-prefix] -ansible/windows/roles/providers/tasks/azure.yml schema[tasks] ansible/windows/roles/providers/tasks/azure.yml yaml[line-length] -ansible/windows/roles/providers/tasks/main.yml name[missing] ansible/windows/roles/runtimes/defaults/main.yml var-naming[no-role-prefix] -ansible/windows/roles/runtimes/tasks/main.yml name[missing] ansible/windows/roles/systemprep/defaults/main.yml var-naming[no-role-prefix] ansible/windows/roles/systemprep/tasks/main.yml ignore-errors -ansible/windows/roles/systemprep/tasks/main.yml name[missing] -ansible/windows/roles/systemprep/tasks/ssh-feature.yml schema[tasks] diff --git a/images/capi/Makefile b/images/capi/Makefile index 92711544ee..47d56c5965 100644 --- a/images/capi/Makefile +++ b/images/capi/Makefile @@ -143,7 +143,7 @@ deps-powervs: deps-common deps-ignition: ## Installs/checks dependencies for generating Ignition files deps-ignition: hack/ensure-jq.sh - hack/ensure-ct.sh + hack/ensure-butane.sh .PHONY: deps-nutanix deps-nutanix: ## Installs/checks dependencies for Nutanix builds @@ -212,7 +212,7 @@ IMAGE_NAME ?= cluster-node-image-builder CONTROLLER_IMG ?= $(REGISTRY)/$(IMAGE_NAME) TAG ?= dev ARCH ?= amd64 -BASE_IMAGE ?= docker.io/library/ubuntu:jammy +BASE_IMAGE ?= docker.io/library/ubuntu:24.04 BUILDKIT_SYNTAX ?= docker/dockerfile:1.14 ## -------------------------------------- @@ -339,8 +339,9 @@ PACKER_POWERVS_NODE_FLAGS := $(foreach f,$(abspath $(COMMON_POWERVS_VAR_FILES)), ## -------------------------------------- FLATCAR_VERSIONS := flatcar PHOTON_VERSIONS := photon-4 photon-5 -RHEL_VERSIONS := rhel-8 rhel-9 -ROCKYLINUX_VERSIONS := rockylinux-8 rockylinux-9 +RHEL_VERSIONS := rhel-9 +ROCKYLINUX_VERSIONS := rockylinux-9 +ALMALINUX_VERSIONS := almalinux-9 UBUNTU_VERSIONS := ubuntu-2204 ubuntu-2204-efi ubuntu-2404 ubuntu-2404-efi WINDOWS_VERSIONS := windows-2019 windows-2019-efi windows-2022 windows-2022-efi @@ -356,6 +357,7 @@ export FLATCAR_CHANNEL FLATCAR_VERSION PLATFORMS_AND_VERSIONS := $(PHOTON_VERSIONS) \ $(RHEL_VERSIONS) \ $(ROCKYLINUX_VERSIONS) \ + $(ALMALINUX_VERSIONS) \ $(UBUNTU_VERSIONS) \ $(FLATCAR_VERSIONS) \ $(WINDOWS_VERSIONS) @@ -367,9 +369,9 @@ NODE_OVA_VSPHERE_BUILD_NAMES := $(addprefix node-ova-vsphere-,$(PLATFORMS_AND_V NODE_OVA_VSPHERE_BASE_BUILD_NAMES := $(addprefix node-ova-vsphere-base-,$(PLATFORMS_AND_VERSIONS)) NODE_OVA_VSPHERE_CLONE_BUILD_NAMES := $(addprefix node-ova-vsphere-clone-,$(PLATFORMS_AND_VERSIONS)) -AMI_BUILD_NAMES ?= ami-ubuntu-2204 ami-ubuntu-2404 ami-amazon-2 ami-amazon-2023 ami-flatcar ami-flatcar-arm64 ami-windows-2019 ami-rockylinux-8 ami-rhel-8 +AMI_BUILD_NAMES ?= ami-ubuntu-2204 ami-ubuntu-2404 ami-amazon-2 ami-amazon-2023 ami-flatcar ami-flatcar-arm64 ami-windows-2019 HUAWEICLOUD_BUILD_NAMES ?= huaweicloud-ubuntu-2204 -GCE_BUILD_NAMES ?= gce-ubuntu-2204 gce-ubuntu-2404 gce-rhel-8 +GCE_BUILD_NAMES ?= gce-ubuntu-2204 gce-ubuntu-2404 # Make needs these lists to be space delimited, no quotes VHD_TARGETS := $(shell grep VHD_TARGETS azure_targets.sh | sed 's/VHD_TARGETS=//' | tr -d \") @@ -381,7 +383,7 @@ AZURE_BUILD_SIG_NAMES ?= $(addprefix azure-sig-,$(SIG_TARGETS)) AZURE_BUILD_SIG_GEN2_NAMES ?= $(addsuffix -gen2,$(addprefix azure-sig-,$(SIG_GEN2_TARGETS))) AZURE_BUILD_SIG_CVM_NAMES ?= $(addsuffix -cvm,$(addprefix azure-sig-,$(SIG_CVM_TARGETS))) -OCI_BUILD_NAMES ?= oci-ubuntu-2204 oci-oracle-linux-8 oci-oracle-linux-9 oci-windows-2019 oci-windows-2022 +OCI_BUILD_NAMES ?= oci-ubuntu-2204 oci-oracle-linux-9 oci-windows-2019 oci-windows-2022 DO_BUILD_NAMES ?= do-ubuntu-2204 do-ubuntu-2404 @@ -389,17 +391,17 @@ OPENSTACK_BUILD_NAMES ?= openstack-ubuntu-2204 openstack-ubuntu-2404 openstack- OSC_BUILD_NAMES ?= osc-ubuntu-2204 osc-ubuntu-2404 -QEMU_BUILD_NAMES ?= qemu-ubuntu-2204 qemu-ubuntu-2204-cloudimg qemu-ubuntu-2404 qemu-ubuntu-2404-efi qemu-ubuntu-2204-efi qemu-centos-9 qemu-rhel-8 qemu-rhel-9 qemu-rockylinux-8 qemu-rockylinux-8-cloudimg qemu-rockylinux-9 qemu-rockylinux-9-cloudimg qemu-flatcar +QEMU_BUILD_NAMES ?= qemu-ubuntu-2204 qemu-ubuntu-2204-cloudimg qemu-ubuntu-2404 qemu-ubuntu-2404-efi qemu-ubuntu-2204-efi qemu-centos-9 qemu-rhel-9 qemu-rockylinux-9 qemu-rockylinux-9-cloudimg qemu-flatcar QEMU_KUBEVIRT_BUILD_NAMES := $(addprefix kubevirt-,$(QEMU_BUILD_NAMES)) -RAW_BUILD_NAMES ?= raw-ubuntu-2204 raw-ubuntu-2204-efi raw-ubuntu-2404 raw-ubuntu-2404-efi raw-flatcar raw-rhel-8 raw-rhel-9 raw-rhel-9-efi +RAW_BUILD_NAMES ?= raw-ubuntu-2204 raw-ubuntu-2204-efi raw-ubuntu-2404 raw-ubuntu-2404-efi raw-flatcar raw-rhel-9 raw-rhel-9-efi -POWERVS_BUILD_NAMES ?= powervs-centos-8 powervs-centos-9 +POWERVS_BUILD_NAMES ?= powervs-centos-9 -NUTANIX_BUILD_NAMES ?= nutanix-ubuntu-2204 nutanix-ubuntu-2404 nutanix-rhel-8 nutanix-rhel-9 nutanix-rockylinux-8 nutanix-rockylinux-9 nutanix-flatcar nutanix-windows-2022 +NUTANIX_BUILD_NAMES ?= nutanix-ubuntu-2204 nutanix-ubuntu-2404 nutanix-rhel-9 nutanix-rockylinux-9 nutanix-flatcar nutanix-windows-2022 -HCLOUD_BUILD_NAMES ?= hcloud-ubuntu-2204 hcloud-ubuntu-2404 hcloud-rockylinux-8 hcloud-rockylinux-9 hcloud-flatcar hcloud-flatcar-arm64 +HCLOUD_BUILD_NAMES ?= hcloud-ubuntu-2204 hcloud-ubuntu-2404 hcloud-rockylinux-9 hcloud-flatcar hcloud-flatcar-arm64 PROXMOX_BUILD_NAMES ?= proxmox-ubuntu-2204 proxmox-ubuntu-2404 proxmox-ubuntu-2404-efi proxmox-rockylinux-9 proxmox-flatcar @@ -698,8 +700,6 @@ build-ami-amazon-2: ## Builds Amazon-2 Linux AMI build-ami-amazon-2023: ## Builds Amazon-2023 Linux AMI build-ami-ubuntu-2204: ## Builds Ubuntu 22.04 AMI build-ami-ubuntu-2404: ## Builds Ubuntu 24.04 AMI -build-ami-rockylinux-8: ## Builds RockyLinux 8 AMI -build-ami-rhel-8: ## Builds RHEL-8 AMI build-ami-flatcar: ## Builds Flatcar build-ami-flatcar-arm64: ## Builds Flatcar arm64 build-ami-windows-2019: ## Build Windows Server 2019 AMI Packer config @@ -708,7 +708,6 @@ build-ami-all: $(AMI_BUILD_TARGETS) ## Builds all AMIs build-azure-sig-ubuntu-2204: ## Builds Ubuntu 22.04 Azure managed image in Shared Image Gallery build-azure-sig-ubuntu-2404: ## Builds Ubuntu 24.04 Azure managed image in Shared Image Gallery build-azure-sig-azurelinux-3: ## Builds Azure Linux 3 Azure managed image in Shared Image Gallery -build-azure-sig-rhel-8: ## Builds RHEL 8 Azure managed image in Shared Image Gallery build-azure-sig-windows-2019-containerd: ## Builds Windows Server 2019 with containerd Azure managed image in Shared Image Gallery build-azure-sig-windows-2022-containerd: ## Builds Windows Server 2022 with containerd Azure managed image in Shared Image Gallery build-azure-sig-windows-2025-containerd: ## Builds Windows Server 2025 with containerd Azure managed image in Shared Image Gallery @@ -717,7 +716,6 @@ build-azure-sig-windows-2022-containerd-cvm: ## Builds Windows Server 2022 with build-azure-vhd-ubuntu-2204: ## Builds Ubuntu 22.04 VHD image for Azure build-azure-vhd-ubuntu-2404: ## Builds Ubuntu 24.04 VHD image for Azure build-azure-vhd-azurelinux-3: ## Builds Azure Linux 3 VHD image for Azure -build-azure-vhd-rhel-8: ## Builds RHEL 8 VHD image for Azure build-azure-vhd-windows-2019-containerd: ## Builds for Windows Server 2019 with containerd build-azure-vhd-windows-2022-containerd: ## Builds for Windows Server 2022 with containerd build-azure-sig-windows-annual-containerd: ## Builds for Windows Server Annual Channel with containerd @@ -737,7 +735,6 @@ build-do-all: $(DO_BUILD_TARGETS) ## Builds all DigitalOcean Snapshot build-gce-ubuntu-2204: ## Builds the GCE ubuntu-2204 image build-gce-ubuntu-2404: ## Builds the GCE ubuntu-2404 image -build-gce-rhel-8: ## Builds the GCE rhel-8 image build-gce-all: $(GCE_BUILD_TARGETS) ## Builds all GCE image build-huaweicloud-ubuntu-2204: ## Builds Ubuntu 22.04 HuaweiCloud image @@ -746,20 +743,18 @@ build-huaweicloud-all: $(HUAWEICLOUD_BUILD_TARGETS) ## Builds all HuaweiCloud im build-node-ova-local-flatcar: ## Builds Flatcar stable Node OVA w local hypervisor build-node-ova-local-photon-4: ## Builds Photon 4 Node OVA w local hypervisor build-node-ova-local-photon-5: ## Builds Photon 5 Node OVA w local hypervisor -build-node-ova-local-rhel-8: ## Builds RHEL 8 Node OVA w local hypervisor build-node-ova-local-rhel-9: ## Builds RHEL 9 Node OVA w local hypervisor -build-node-ova-local-rockylinux-8: ## Builds RockyLinux 8 Node OVA w local hypervisor build-node-ova-local-rockylinux-9: ## Builds RockyLinux 9 Node OVA w local hypervisor +build-node-ova-local-almalinux-9: ## Builds AlmaLinux 9 Node OVA w local hypervisor build-node-ova-local-windows-2019: ## Builds for Windows Server 2019 Node OVA w local hypervisor build-node-ova-local-all: $(NODE_OVA_LOCAL_BUILD_TARGETS) ## Builds all Node OVAs w local hypervisor build-node-ova-vsphere-flatcar: ## Builds Flatcar stable Node OVA and template on vSphere build-node-ova-vsphere-photon-4: ## Builds Photon 4 Node OVA and template on vSphere build-node-ova-vsphere-photon-5: ## Builds Photon 5 Node OVA and template on vSphere -build-node-ova-vsphere-rhel-8: ## Builds RHEL 8 Node OVA and template on vSphere build-node-ova-vsphere-rhel-9: ## Builds RHEL 9 Node OVA and template on vSphere -build-node-ova-vsphere-rockylinux-8: ## Builds RockyLinux 8 Node OVA and template on vSphere build-node-ova-vsphere-rockylinux-9: ## Builds RockyLinux 9 Node OVA and template on vSphere +build-node-ova-vsphere-almalinux-9: ## Builds AlmaLinux 9 Node OVA and template on vSphere build-node-ova-vsphere-ubuntu-2204: ## Builds Ubuntu 22.04 Node OVA and template on vSphere build-node-ova-vsphere-ubuntu-2204-efi: ## Builds Ubuntu 22.04 Node OVA and template on vSphere that EFI boots build-node-ova-vsphere-ubuntu-2404: ## Builds Ubuntu 24.04 Node OVA and template on vSphere @@ -772,10 +767,9 @@ build-node-ova-vsphere-all: $(NODE_OVA_VSPHERE_BUILD_TARGETS) ## Builds all Node build-node-ova-vsphere-clone-photon-4: ## Builds Photon 4 Node OVA and template on vSphere build-node-ova-vsphere-clone-photon-5: ## Builds Photon 5 Node OVA and template on vSphere -build-node-ova-vsphere-clone-rhel-8: ## Builds RHEL 8 Node OVA and template on vSphere build-node-ova-vsphere-clone-rhel-9: ## Builds RHEL 9 Node OVA and template on vSphere -build-node-ova-vsphere-clone-rockylinux-8: ## Builds RockyLinux 8 Node OVA and template on vSphere build-node-ova-vsphere-clone-rockylinux-9: ## Builds RockyLinux 9 Node OVA and template on vSphere +build-node-ova-vsphere-clone-almalinux-9: ## Builds AlmaLinux 9 Node OVA and template on vSphere build-node-ova-vsphere-clone-ubuntu-2204: ## Builds Ubuntu 22.04 Node OVA and template on vSphere build-node-ova-vsphere-clone-ubuntu-2204-efi: ## ## Builds Ubuntu 22.04 Node OVA and template on vSphere that EFI boots build-node-ova-vsphere-clone-ubuntu-2404: ## Builds Ubuntu 24.04 Node OVA and template on vSphere @@ -784,10 +778,9 @@ build-node-ova-vsphere-clone-all: $(NODE_OVA_VSPHERE_CLONE_BUILD_TARGETS) ## Bui build-node-ova-vsphere-base-photon-4: ## Builds base Photon 4 Node OVA and template on vSphere build-node-ova-vsphere-base-photon-5: ## Builds base Photon 5 Node OVA and template on vSphere -build-node-ova-vsphere-base-rhel-8: ## Builds base RHEL 8 Node OVA and template on vSphere build-node-ova-vsphere-base-rhel-9: ## Builds base RHEL 9 Node OVA and template on vSphere -build-node-ova-vsphere-base-rockylinux-8: ## Builds base RockyLinux 8 Node OVA and template on vSphere build-node-ova-vsphere-base-rockylinux-9: ## Builds base RockyLinux 9 Node OVA and template on vSphere +build-node-ova-vsphere-base-almalinux-9: ## Builds base AlmaLinux 9 Node OVA and template on vSphere build-node-ova-vsphere-base-ubuntu-2204: ## Builds base Ubuntu 22.04 Node OVA and template on vSphere build-node-ova-vsphere-base-ubuntu-2204-efi: ## Builds Ubuntu 22.04 Node OVA and template on vSphere that EFI boots build-node-ova-vsphere-base-ubuntu-2404: ## Builds base Ubuntu 24.04 Node OVA and template on vSphere @@ -796,17 +789,15 @@ build-node-ova-vsphere-base-all: $(NODE_OVA_VSPHERE_BASE_BUILD_TARGETS) ## Build build-node-ova-local-vmx-photon-4: ## Builds Photon 4 Node OVA from VMX file w local hypervisor build-node-ova-local-vmx-photon-5: ## Builds Photon 5 Node OVA from VMX file w local hypervisor -build-node-ova-local-vmx-rhel-8: ## Builds RHEL 8 Node OVA from VMX file w local hypervisor build-node-ova-local-vmx-rhel-9: ## Builds RHEL 9 Node OVA from VMX file w local hypervisor -build-node-ova-local-vmx-rockylinux-8: ## Builds RockyLinux 8 Node OVA from VMX file w local hypervisor build-node-ova-local-vmx-rockylinux-9: ## Builds RockyLinux 9 Node OVA from VMX file w local hypervisor +build-node-ova-local-vmx-almalinux-9: ## Builds AlmaLinux 9 Node OVA from VMX file w local hypervisor build-node-ova-local-base-photon-4: ## Builds Photon 4 Base Node OVA w local hypervisor build-node-ova-local-base-photon-5: ## Builds Photon 5 Base Node OVA w local hypervisor -build-node-ova-local-base-rhel-8: ## Builds RHEL 8 Base Node OVA w local hypervisor build-node-ova-local-base-rhel-9: ## Builds RHEL 9 Base Node OVA w local hypervisor -build-node-ova-local-base-rockylinux-8: ## Builds RockyLinux 8 Base Node OVA w local hypervisor -build-node-ova-local-base-rockylinux-9: ## Builds RockyLinux 9 Base Node OVA w local hypervisor +build-node-ova-local-base-rockylinux-9: ## Builds base RockyLinux 9 Base Node OVA w local hypervisor +build-node-ova-local-base-almalinux-9: ## Builds base AlmaLinux 9 Base Node OVA w local hypervisor build-openstack-ubuntu-2204: ## Builds Ubuntu 22.04 OpenStack image build-openstack-ubuntu-2404: ## Builds Ubuntu 24.04 OpenStack image @@ -821,10 +812,7 @@ build-qemu-ubuntu-2204-efi: ## Builds Ubuntu 22.04 QEMU image that EFI boots build-qemu-ubuntu-2404: ## Builds Ubuntu 24.04 QEMU image build-qemu-ubuntu-2404-efi: ## Builds Ubuntu 24.04 QEMU image that EFI boots build-qemu-centos-9: ## Builds CentOS 9 Stream QEMU image -build-qemu-rhel-8: ## Builds RHEL 8 QEMU image build-qemu-rhel-9: ## Builds RHEL 9 QEMU image -build-qemu-rockylinux-8: ## Builds Rocky 8 QEMU image -build-qemu-rockylinux-8-cloudimg: ## Builds Rocky 8 QEMU image using cloud image build-qemu-rockylinux-9: ## Builds Rocky 9 QEMU image build-qemu-rockylinux-9-cloudimg: ## Builds Rocky 9 QEMU image using cloud image build-qemu-all: $(QEMU_BUILD_TARGETS) ## Builds all Qemu images @@ -837,13 +825,11 @@ build-raw-ubuntu-2204: ## Builds Ubuntu 22.04 RAW image build-raw-ubuntu-2204-efi: ## Builds Ubuntu 22.04 RAW image that EFI boots build-raw-ubuntu-2404: ## Builds Ubuntu 24.04 RAW image build-raw-ubuntu-2404-efi: ## Builds Ubuntu 24.04 RAW image that EFI boots -build-raw-rhel-8: ## Builds RHEL 8 RAW image build-raw-rhel-9: ## Builds RHEL 9 RAW image build-raw-rhel-9-efi: ## Builds RHEL 9 RAW image that EFI boots build-raw-all: $(RAW_BUILD_TARGETS) ## Builds all RAW images build-oci-ubuntu-2204: ## Builds the OCI ubuntu-2204 image -build-oci-oracle-linux-8: ## Builds the OCI Oracle Linux 8.x image build-oci-oracle-linux-9: ## Builds the OCI Oracle Linux 9.x image build-oci-windows-2019: ## Builds the OCI Windows Server 2019 image build-oci-windows-2022: ## Builds the OCI Windows Server 2022 image @@ -853,37 +839,36 @@ build-osc-ubuntu-2204: ## Builds Ubuntu 22.04 Outscale Snapshot build-osc-ubuntu-2404: ## Builds Ubuntu 24.04 Outscale Snapshot build-osc-all: $(OSC_BUILD_TARGETS) ## Builds all Outscale Snapshot -build-nutanix-ubuntu-2204: ## Builds the Nutanix ubuntu-2204 image -build-nutanix-ubuntu-2404: ## Builds the Nutanix ubuntu-2404 image -build-nutanix-rhel-8: ## Builds the Nutanix RedHat Enterprise Linux 8 image -build-nutanix-rhel-9: ## Builds the Nutanix edHat Enterprise Linux 9 image -build-nutanix-rockylinux-8: ## Builds the Nutanix Rocky Linux 8 image +build-nutanix-ubuntu-2204: ## Builds Ubuntu 22.04 Nutanix image +build-nutanix-ubuntu-2404: ## Builds Ubuntu 24.04 Nutanix image +build-nutanix-rhel-9: ## Builds the Nutanix RedHat Enterprise Linux 9 image build-nutanix-rockylinux-9: ## Builds the Nutanix Rocky Linux 9 image build-nutanix-flatcar: ## Builds the Nutanix Flatcar image build-nutanix-windows-2022: ## Builds the Nutanix Windows 2022 image build-nutanix-all: $(NUTANIX_BUILD_TARGETS) ## Builds all Nutanix image -build-hcloud-ubuntu-2204: ## Builds the Hetzner Cloud ubuntu-2204 image -build-hcloud-ubuntu-2404: ## Builds the Hetzner Cloud ubuntu-2404 image -build-hcloud-rockylinux-8: ## Builds the Hetzner Cloud Rocky Linux 8 image +build-hcloud-ubuntu-2204: ## Builds the Hetzner Cloud Ubuntu 22.04 image +build-hcloud-ubuntu-2404: ## Builds the Hetzner Cloud Ubuntu 24.04 image build-hcloud-rockylinux-9: ## Builds the Hetzner Cloud Rocky Linux 9 image build-hcloud-flatcar: ## Builds the Hetzner Cloud Flatcar image build-hcloud-flatcar-arm64: ## Builds the Hetzner Cloud Flatcar arm64 image build-hcloud-all: $(HCLOUD_BUILD_TARGETS) ## Builds all Hetzner Cloud image +build-proxmox-ubuntu-2204: ## Builds Ubuntu 22.04 Proxmox image +build-proxmox-ubuntu-2404: ## Builds Ubuntu 24.04 Proxmox image +build-proxmox-ubuntu-2404-efi: ## Builds Ubuntu 24.04 Proxmox image that EFI boots +build-proxmox-rockylinux-9: ## Builds Rocky Linux 9 Proxmox image build-proxmox-flatcar: ## Builds Flatcar Proxmox image -build-proxmox-ubuntu-2204: ## Builds the Proxmox ubuntu-2204 image -build-proxmox-ubuntu-2404: ## Builds the Proxmox ubuntu-2404 image -build-proxmox-ubuntu-2404-efi: ## Builds the Proxmox ubuntu-2404-efi image that EFI boots -build-proxmox-rockylinux-9: ## Builds the Proxmox rockylinux-9 image +build-proxmox-all: $(PROXMOX_BUILD_TARGETS) ## Builds all Proxmox images build-vultr-ubuntu-2204: ## Builds Ubuntu 22.04 Vultr Snapshot build-vultr-ubuntu-2404: ## Builds Ubuntu 24.04 Vultr Snapshot +build-vultr-all: $(VULTR_BUILD_TARGETS) ## Builds all Vultr Snapshots -build-scaleway-rockylinux-9: ## Builds the Scaleway rockylinux-9 image +build-scaleway-rockylinux-9: ## Builds Rocky Linux 9 Scaleway image build-scaleway-ubuntu-2204: ## Builds Ubuntu 22.04 Scaleway image build-scaleway-ubuntu-2404: ## Builds Ubuntu 24.04 Scaleway image -build-scaleway-all: $(SCALEWAY_BUILD_TARGETS) ## Builds all Scaleway Cloud images +build-scaleway-all: $(SCALEWAY_BUILD_TARGETS) ## Builds all Scaleway images ## -------------------------------------- ## Document dynamic validate targets @@ -891,19 +876,17 @@ build-scaleway-all: $(SCALEWAY_BUILD_TARGETS) ## Builds all Scaleway Cloud image ##@ Validate packer config validate-ami-amazon-2: ## Validates Amazon-2 Linux AMI Packer config validate-ami-amazon-2023: ## Validates Amazon-2023 Linux AMI Packer config -validate-ami-rockylinux-8: ## Validates RockyLinux 8 AMI Packer config -validate-ami-rhel-8: ## Validates RHEL-8 AMI Packer config validate-ami-flatcar: ## Validates Flatcar AMI Packer config validate-ami-flatcar-arm64: ## Validates Flatcar arm64 AMI Packer config validate-ami-ubuntu-2204: ## Validates Ubuntu 22.04 AMI Packer config -validate-ami-ubuntu-2404: ## Validates Ubuntu 22.04 AMI Packer config + +validate-ami-ubuntu-2404: ## Validates Ubuntu 24.04 AMI Packer config validate-ami-windows-2019: ## Validates Windows Server 2019 AMI Packer config validate-ami-all: $(AMI_VALIDATE_TARGETS) ## Validates all AMIs Packer config validate-huaweicloud-ubuntu-2204: ## Validates Ubuntu 22.04 HuaweiCloud Snapshot Packer config validate-azure-sig-azurelinux-3: ## Validates Azure Linux 3 Azure managed image in Shared Image Gallery Packer config -validate-azure-sig-rhel-8: ## Validates RHEL 8 Azure managed image in Shared Image Gallery Packer config validate-azure-sig-ubuntu-2204: ## Validates Ubuntu 22.04 Azure managed image in Shared Image Gallery Packer config validate-azure-sig-ubuntu-2404: ## Validates Ubuntu 24.04 Azure managed image in Shared Image Gallery Packer config validate-azure-sig-windows-2019-containerd: ## Validate Windows Server 2019 with containerd Azure managed image in Shared Image Gallery Packer config @@ -911,7 +894,6 @@ validate-azure-sig-windows-2022-containerd: ## Validate Windows Server 2022 with validate-azure-sig-windows-2025-containerd: ## Validate Windows Server 2025 with containerd Azure managed image in Shared Image Gallery Packer config validate-azure-sig-windows-annual-containerd: ## Validate Windows Server Annual Channel with containerd Azure managed image in Shared Image Gallery Packer config validate-azure-vhd-azurelinux-3: ## Validates Azure Linux 3 VHD image Azure Packer config -validate-azure-vhd-rhel-8: ## Validates RHEL 8 VHD image Azure Packer config validate-azure-vhd-ubuntu-2204: ## Validates Ubuntu 22.04 VHD image Azure Packer config validate-azure-vhd-ubuntu-2404: ## Validates Ubuntu 24.04 VHD image Azure Packer config validate-azure-vhd-windows-2019-containerd: ## Validate Windows Server 2019 VHD with containerd image Azure Packer config @@ -935,16 +917,14 @@ validate-openstack-all: $(OPENSTACK_VALIDATE_TARGETS) ## Validates all Openstack validate-gce-ubuntu-2204: ## Validates Ubuntu 22.04 GCE Snapshot Packer config validate-gce-ubuntu-2404: ## Validates Ubuntu 24.04 GCE Snapshot Packer config -validate-gce-rhel-8: ## Validates RHEL 8 GCE Snapshot Packer config validate-gce-all: $(GCE_VALIDATE_TARGETS) ## Validates all GCE Snapshot Packer config validate-node-ova-local-flatcar: ## Validates Flatcar stable Node OVA Packer config w local hypervisor validate-node-ova-local-photon-4: ## Validates Photon 4 Node OVA Packer config w local hypervisor validate-node-ova-local-photon-5: ## Validates Photon 5 Node OVA Packer config w local hypervisor -validate-node-ova-local-rhel-8: ## Validates RHEL 8 Node OVA Packer config w local hypervisor validate-node-ova-local-rhel-9: ## Validates RHEL 9 Node OVA Packer config w local hypervisor -validate-node-ova-local-rockylinux-8: ## Validates RockyLinux 8 Node OVA Packer config w local hypervisor validate-node-ova-local-rockylinux-9: ## Validates RockyLinux 9 Node OVA Packer config w local hypervisor +validate-node-ova-local-almalinux-9: ## Validates AlmaLinux 9 Node OVA Packer config w local hypervisor validate-node-ova-local-ubuntu-2204: ## Validates Ubuntu 22.04 Node OVA Packer config w local hypervisor validate-node-ova-local-ubuntu-2404: ## Validates Ubuntu 24.04 Node OVA Packer config w local hypervisor validate-node-ova-local-windows-2019: ## Validates Windows Server 2019 Node OVA Packer config w local hypervisor @@ -953,19 +933,17 @@ validate-node-ova-local-all: $(NODE_OVA_LOCAL_VALIDATE_TARGETS) ## Validates all validate-node-ova-local-vmx-photon-4: ## Validates Photon 4 Node OVA from VMX file w local hypervisor validate-node-ova-local-vmx-photon-5: ## Validates Photon 5 Node OVA from VMX file w local hypervisor -validate-node-ova-local-vmx-rhel-8: ## Validates RHEL 8 Node OVA from VMX file w local hypervisor validate-node-ova-local-vmx-rhel-9: ## Validates RHEL 9 Node OVA from VMX file w local hypervisor -validate-node-ova-local-vmx-rockylinux-8: ## Validates RockyLinux 8 Node OVA from VMX file w local hypervisor validate-node-ova-local-vmx-rockylinux-9: ## Validates RockyLinux 9 Node OVA from VMX file w local hypervisor +validate-node-ova-local-vmx-almalinux-9: ## Validates AlmaLinux 9 Node OVA from VMX file w local hypervisor validate-node-ova-local-vmx-ubuntu-2204: ## Validates Ubuntu 22.04 Node OVA from VMX file w local hypervisor validate-node-ova-local-vmx-ubuntu-2404: ## Validates Ubuntu 24.04 Node OVA from VMX file w local hypervisor validate-node-ova-local-base-photon-4: ## Validates Photon 4 Base Node OVA w local hypervisor validate-node-ova-local-base-photon-5: ## Validates Photon 5 Base Node OVA w local hypervisor -validate-node-ova-local-base-rhel-8: ## Validates RHEL 8 Base Node OVA w local hypervisor validate-node-ova-local-base-rhel-9: ## Validates RHEL 9 Base Node OVA w local hypervisor -validate-node-ova-local-base-rockylinux-8: ## Validates RockyLinux 8 Base Node OVA w local hypervisor validate-node-ova-local-base-rockylinux-9: ## Validates RockyLinux 9 Base Node OVA w local hypervisor +validate-node-ova-local-base-almalinux-9: ## Validates AlmaLinux 9 Base Node OVA w local hypervisor validate-node-ova-local-base-ubuntu-2204: ## Validates Ubuntu 22.04 Base Node OVA w local hypervisor validate-node-ova-local-base-ubuntu-2404: ## Validates Ubuntu 24.04 Base Node OVA w local hypervisor @@ -975,10 +953,7 @@ validate-qemu-ubuntu-2204-cloudimg: ## Validates Ubuntu 22.04 QEMU image packer validate-qemu-ubuntu-2204-efi: ## Validates Ubuntu 22.04 QEMU EFI image packer config validate-qemu-ubuntu-2404: ## Validates Ubuntu 24.04 QEMU image packer config validate-qemu-ubuntu-2404-efi: ## Validates Ubuntu 24.04 QEMU EFI image packer config -validate-qemu-rhel-8: ## Validates RHEL 8 QEMU image validate-qemu-rhel-9: ## Validates RHEL 9 QEMU image -validate-qemu-rockylinux-8: ## Validates Rocky Linux 8 QEMU image packer config -validate-qemu-rockylinux-8-cloudimg: ## Validates Rocky Linux 8 QEMU image packer config using cloud image validate-qemu-rockylinux-9: ## Validates Rocky Linux 9 QEMU image packer config validate-qemu-rockylinux-9-cloudimg: ## Validates Rocky Linux 9 QEMU image packer config using cloud image validate-qemu-all: $(QEMU_VALIDATE_TARGETS) ## Validates all Qemu Packer config @@ -988,11 +963,10 @@ validate-raw-ubuntu-2204: ## Validates Ubuntu 22.04 RAW image packer config validate-raw-ubuntu-2204-efi: ## Validates Ubuntu 22.04 RAW EFI image packer config validate-raw-ubuntu-2404: ## Validates Ubuntu 24.04 RAW image packer config validate-raw-ubuntu-2404-efi: ## Validates Ubuntu 24.04 RAW EFI image packer config -validate-raw-rhel-8: ## Validates RHEL 8 RAW image packer config +validate-raw-rhel-9: ## Validates RHEL 9 RAW image packer config validate-raw-all: $(RAW_VALIDATE_TARGETS) ## Validates all RAW Packer config validate-oci-ubuntu-2204: ## Validates the OCI ubuntu-2204 image packer config -validate-oci-oracle-linux-8: ## Validates the OCI Oracle Linux 8.x image packer config validate-oci-oracle-linux-9: ## Validates the OCI Oracle Linux 9.x image packer config validate-oci-windows-2019: ## Validates the OCI Windows 2019 image packer config validate-oci-windows-2022: ## Validates the OCI Windows 2022 image packer config @@ -1002,15 +976,12 @@ validate-osc-ubuntu-2204: ## Validates Ubuntu 22.04 Outscale Snapshot Packer con validate-osc-ubuntu-2404: ## Validates Ubuntu 24.04 Outscale Snapshot Packer config validate-osc-all: $(OSC_VALIDATE_TARGETS) ## Validates all Outscale Snapshot Packer config -validate-powervs-centos-8: ## Validates the PowerVS CentOS 8 image packer config validate-powervs-centos-9: ## Validates the PowerVS CentOS 9 image packer config validate-powervs-all: $(POWERVS_VALIDATE_TARGETS) ## Validates all PowerVS Packer config validate-nutanix-ubuntu-2204: ## Validates Ubuntu 22.04 Nutanix Packer config validate-nutanix-ubuntu-2404: ## Validates Ubuntu 24.04 Nutanix Packer config -validate-nutanix-rhel-8: ## Validates RedHat Enterprise Linux 8 Nutanix Packer config validate-nutanix-rhel-9: ## Validates RedHat Enterprise Linux 9 Nutanix Packer config -validate-nutanix-rockylinux-8: ## Validates Rocky Linux 8 Nutanix Packer config validate-nutanix-rockylinux-9: ## Validates Rocky Linux 9 Nutanix Packer config validate-nutanix-flatcar: ## Validates the Nutanix Flatcar Nutanix Packer config validate-nutanix-windows-2022: ## Validates Windows Server 2022 Nutanix Packer config @@ -1018,17 +989,21 @@ validate-nutanix-all: $(NUTANIX_VALIDATE_TARGETS) ## Validates all Nutanix Packe validate-hcloud-ubuntu-2204: ## Validates Ubuntu 22.04 Hetzner Cloud Packer config validate-hcloud-ubuntu-2404: ## Validates Ubuntu 24.04 Hetzner Cloud Packer config -validate-hcloud-rockylinux-8: ## Validates Rocky Linux 8 Hetzner Cloud Packer config -validate-hcloud-rockylinux-9: ## Validates the Hetzner Cloud Rocky Linux 9 Packer config +validate-hcloud-rockylinux-9: ## Validates Rocky Linux 9 Hetzner Cloud Packer config validate-hcloud-flatcar: ## Validates the Hetzner Cloud Flatcar Packer config validate-hcloud-flatcar-arm64: ## Validates the Hetzner Cloud Flatcar arm64 Packer config validate-hcloud-all: $(HCLOUD_VALIDATE_TARGETS) ## Validates all Hetzner Cloud Packer config validate-proxmox-ubuntu-2204: ## Validates Ubuntu 22.04 Proxmox Packer config +validate-proxmox-ubuntu-2404: ## Validates Ubuntu 24.04 Proxmox Packer config +validate-proxmox-ubuntu-2404-efi: ## Validates Ubuntu 24.04 EFI Proxmox Packer config +validate-proxmox-rockylinux-9: ## Validates Rocky Linux 9 Proxmox Packer config validate-proxmox-flatcar: ## Validates Flatcar Proxmox Packer config +validate-proxmox-all: $(PROXMOX_VALIDATE_TARGETS) ## Validates all Proxmox Packer config validate-vultr-ubuntu-2204: ## Validates Ubuntu 22.04 Vultr Snapshot Packer config validate-vultr-ubuntu-2404: ## Validates Ubuntu 24.04 Vultr Snapshot Packer config +validate-vultr-all: $(VULTR_VALIDATE_TARGETS) ## Validates all Vultr Snapshot Packer config validate-scaleway-rockylinux-9: ## Validates Rocky Linux 9 Scaleway image Packer config validate-scaleway-ubuntu-2204: ## Validates Ubuntu 22.04 Scaleway image Packer config @@ -1044,10 +1019,10 @@ validate-all: validate-ami-all \ validate-raw-all \ validate-oci-all \ validate-osc-all \ - validate-powervs-all \ validate-nutanix-all \ validate-hcloud-all \ - validate-scaleway-all + validate-scaleway-all \ + validate-powervs-all validate-all: ## Validates the Packer config for all build targets @@ -1055,19 +1030,19 @@ validate-all: ## Validates the Packer config for all build targets lint: ## Runs linters on image-builder code sh_files = $(shell find . -type f -name "*.sh") lint: deps-lint - ansible-lint ansible/ + ansible-lint --project-dir . ansible/ # ignore error code since shellcheck exits with Error 1 if problems are found despite running properly -@for f in $(sh_files); do (shellcheck -x $$f); done .PHONY: lint-fix lint-fix: ## Runs linters on image-builder code and fixes issues lint-fix: deps-lint - ansible-lint --fix=all ansible/ + ansible-lint --fix=all --project-dir . ansible/ .PHONY: lint-ignore lint-ignore: ## Runs linters on image-builder code and creates an "ignore" file lint-ignore: deps-lint - ansible-lint --generate-ignore ansible/ + ansible-lint --generate-ignore --project-dir . ansible/ ## -------------------------------------- ## Clean targets @@ -1155,7 +1130,7 @@ json-sort: ## Sort all JSON files alphabetically .PHONY: gen-ignition ignition_files = bootstrap-pass-auth bootstrap-cloud gen-ignition: deps-ignition ## Generates Ignition files from CLC - for f in $(ignition_files); do (ct < packer/files/flatcar/clc/$$f.yaml | jq '.' > packer/files/flatcar/ignition/$$f.json) || exit 1; done + for f in $(ignition_files); do (butane --pretty --strict < packer/files/flatcar/clc/$$f.yaml | jq '.' > packer/files/flatcar/ignition/$$f.json) || exit 1; done ## -------------------------------------- ## ISO checksum updates diff --git a/images/capi/ansible.cfg b/images/capi/ansible.cfg index b086976e19..48c6d1e646 100644 --- a/images/capi/ansible.cfg +++ b/images/capi/ansible.cfg @@ -15,6 +15,7 @@ [defaults] remote_tmp = /tmp/.ansible display_skipped_hosts = False +inject_facts_as_vars = False [ssh_connection] pipelining = False diff --git a/images/capi/ansible/firstboot.yml b/images/capi/ansible/firstboot.yml index 779383808c..bbaf133248 100644 --- a/images/capi/ansible/firstboot.yml +++ b/images/capi/ansible/firstboot.yml @@ -28,22 +28,26 @@ ansible.builtin.raw: test -e /usr/bin/apt && (apt-get update && apt-get install -y python3) || (yum install -y python3) when: python_installed.rc != 0 -- hosts: all +- name: Run firstboot roles + hosts: all become: true vars: firstboot_custom_roles_pre: "" firstboot_custom_roles_post: "" tasks: - - ansible.builtin.include_role: + - name: Include pre-firstboot custom roles + ansible.builtin.include_role: name: "{{ role }}" loop: "{{ firstboot_custom_roles_pre.split() }}" loop_control: loop_var: role when: firstboot_custom_roles_pre != "" - - ansible.builtin.include_role: + - name: Include firstboot role + ansible.builtin.include_role: name: firstboot - - ansible.builtin.include_role: + - name: Include post-firstboot custom roles + ansible.builtin.include_role: name: "{{ role }}" loop: "{{ firstboot_custom_roles_post.split() }}" loop_control: diff --git a/images/capi/ansible/node.yml b/images/capi/ansible/node.yml index ab79237e14..09e0c2c804 100644 --- a/images/capi/ansible/node.yml +++ b/images/capi/ansible/node.yml @@ -12,46 +12,57 @@ # See the License for the specific language governing permissions and # limitations under the License. --- -- hosts: all +- name: Provision node + hosts: all become: true vars: node_custom_roles_pre: "" node_custom_roles_post: "" custom_role_names: "" system: linux - arch_uname: "{{ ansible_architecture }}" - arch: "{{ 'amd64' if arch_uname in ['x86_64', 'amd64'] else 'arm64' if arch_uname in ['aarch64', 'arm64'] else 'unsupported' }}" + arch_uname: "{{ ansible_facts['architecture'] }}" + arch: "{{ {'x86_64': 'amd64', 'amd64': 'amd64', 'aarch64': 'arm64', 'arm64': 'arm64', 'ppc64le': 'ppc64le'}.get(arch_uname, 'unsupported') }}" tasks: - - ansible.builtin.include_role: + - name: Include pre-node custom roles + ansible.builtin.include_role: name: "{{ role }}" loop: "{{ node_custom_roles_pre.split() }}" loop_control: loop_var: role when: node_custom_roles_pre != "" - - ansible.builtin.include_role: + - name: Include node role + ansible.builtin.include_role: name: node - - ansible.builtin.include_role: + - name: Include providers role + ansible.builtin.include_role: name: providers - - ansible.builtin.include_role: + - name: Include containerd role + ansible.builtin.include_role: name: containerd - - ansible.builtin.include_role: + - name: Include kubernetes role + ansible.builtin.include_role: name: kubernetes - - ansible.builtin.include_role: + - name: Include load_additional_components role + ansible.builtin.include_role: name: load_additional_components when: load_additional_components | bool - - ansible.builtin.include_role: + - name: Include ecr_credential_provider role + ansible.builtin.include_role: name: ecr_credential_provider when: ecr_credential_provider | bool - - ansible.builtin.include_role: + - name: Include post-node custom roles + ansible.builtin.include_role: name: "{{ role }}" loop: "{{ custom_role_names.split() + node_custom_roles_post.split() }}" loop_control: loop_var: role when: custom_role_names != "" or node_custom_roles_post != "" - - ansible.builtin.include_role: + - name: Include sysprep role + ansible.builtin.include_role: name: sysprep - - ansible.builtin.include_role: + - name: Include post-sysprep custom roles + ansible.builtin.include_role: name: "{{ role }}" loop: "{{ node_custom_roles_post_sysprep.split() }}" loop_control: diff --git a/images/capi/ansible/python.yml b/images/capi/ansible/python.yml index 53b556ec1f..04a0cab54d 100644 --- a/images/capi/ansible/python.yml +++ b/images/capi/ansible/python.yml @@ -12,14 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. --- -- hosts: all +- name: Install Python + hosts: all # Gathering facts requires Python to be available, so it's a chicken and egg # problem since this playbook installs Python. gather_facts: false become: true tasks: - - ansible.builtin.include_role: + - name: Include python role + ansible.builtin.include_role: name: python environment: diff --git a/images/capi/ansible/roles/containerd/defaults/main.yml b/images/capi/ansible/roles/containerd/defaults/main.yml index 21e309a211..a470d15d2e 100644 --- a/images/capi/ansible/roles/containerd/defaults/main.yml +++ b/images/capi/ansible/roles/containerd/defaults/main.yml @@ -18,6 +18,6 @@ containerd_gvisor_version: latest containerd_baseurl: https://github.com/containerd/containerd/releases/download/v{{ containerd_version }} containerd_filename: "containerd-{{ containerd_version }}-{{ system }}-{{ arch }}.tar.gz" containerd_url: "{{ containerd_baseurl }}/{{ containerd_filename }}" -containerd_runc_url: "https://github.com/opencontainers/runc/releases/download/v{{ containerd_runc_version }}/runc.{{ arch }}" -containerd_runc_version: "1.2.3" -containerd_runc_checksum_url: "https://github.com/opencontainers/runc/releases/download/v{{ containerd_runc_version }}/runc.sha256sum" +containerd_runc_url: "https://github.com/opencontainers/runc/releases/download/v{{ runc_version }}/runc.{{ arch }}" +runc_version: "1.3.4" +containerd_runc_checksum_url: "https://github.com/opencontainers/runc/releases/download/v{{ runc_version }}/runc.sha256sum" diff --git a/images/capi/ansible/roles/containerd/tasks/main.yml b/images/capi/ansible/roles/containerd/tasks/main.yml index cd0368f0b8..b76ae006db 100644 --- a/images/capi/ansible/roles/containerd/tasks/main.yml +++ b/images/capi/ansible/roles/containerd/tasks/main.yml @@ -12,14 +12,17 @@ # See the License for the specific language governing permissions and # limitations under the License. --- -- ansible.builtin.import_tasks: debian.yml - when: ansible_os_family == "Debian" +- name: Import Debian containerd tasks + ansible.builtin.import_tasks: debian.yml + when: ansible_facts['os_family'] == "Debian" -- ansible.builtin.import_tasks: redhat.yml - when: ansible_os_family in ["Common Base Linux Mariner", "Microsoft Azure Linux", "RedHat"] +- name: Import RedHat containerd tasks + ansible.builtin.import_tasks: redhat.yml + when: ansible_facts['os_family'] in ["Common Base Linux Mariner", "Microsoft Azure Linux", "RedHat"] -- ansible.builtin.import_tasks: photon.yml - when: ansible_os_family == "VMware Photon OS" +- name: Import Photon containerd tasks + ansible.builtin.import_tasks: photon.yml + when: ansible_facts['os_family'] == "VMware Photon OS" # TODO(vincepri): Use deb/rpm packages once available. # See https://github.com/containerd/containerd/issues/1508 for context. @@ -70,7 +73,7 @@ dest: "{{ containerd_prefix | default('/usr/local') }}" extra_opts: - --no-overwrite-dir - when: ansible_os_family != "Flatcar" + when: ansible_facts['os_family'] != "Flatcar" - name: Copy containerd.service to /etc/systemd/system ansible.builtin.copy: @@ -85,7 +88,7 @@ src: /tmp/runc dest: /usr/local/sbin/runc mode: "0755" - when: ansible_os_family != "Flatcar" + when: ansible_facts['os_family'] != "Flatcar" # Install containerd Wasm shims specified in a comma-separated string. Known runtimes are 'lunatic', 'slight', 'spin', and 'wws'. - name: Unpack containerd-wasm-shims @@ -95,7 +98,7 @@ dest: "{{ sysusr_prefix }}/bin" extra_opts: - --no-overwrite-dir - when: ansible_os_family != "Flatcar" and (containerd_wasm_shims_runtimes | length > 0) + when: ansible_facts['os_family'] != "Flatcar" and (containerd_wasm_shims_runtimes | length > 0) loop: "{{ containerd_wasm_shims_runtimes | split(',') }}" - name: Unpack containerd for Flatcar to /opt/bin @@ -103,7 +106,7 @@ remote_src: true src: /tmp/{{ containerd_filename }} dest: "{{ containerd_prefix | default('/opt') }}" - when: ansible_os_family == "Flatcar" + when: ansible_facts['os_family'] == "Flatcar" - name: Copy runc to /opt/bin ansible.builtin.copy: @@ -111,7 +114,7 @@ src: /tmp/runc dest: /opt/bin/runc mode: "0755" - when: ansible_os_family == "Flatcar" + when: ansible_facts['os_family'] == "Flatcar" # Install containerd Wasm shims specified in a comma-separated string. Known runtimes are 'lunatic', 'slight', 'spin', and 'wws'. - name: Unpack containerd-wasm-shims for Flatcar to /opt/bin @@ -121,7 +124,7 @@ dest: "{{ sysusr_prefix }}/bin" extra_opts: - --no-overwrite-dir - when: ansible_os_family == "Flatcar" and (containerd_wasm_shims_runtimes | length > 0) + when: ansible_facts['os_family'] == "Flatcar" and (containerd_wasm_shims_runtimes | length > 0) loop: "{{ containerd_wasm_shims_runtimes | split(',') }}" - name: Create unit file directory @@ -135,7 +138,7 @@ dest: /etc/systemd/system/containerd.service.d/10-opt-bin-custom.conf src: etc/systemd/system/containerd-flatcar.conf mode: "0600" - when: ansible_os_family == "Flatcar" + when: ansible_facts['os_family'] == "Flatcar" - name: Create containerd memory pressure drop-in file ansible.builtin.template: @@ -154,7 +157,7 @@ dest: /etc/systemd/system/containerd.service.d/limit-nofile.conf src: etc/systemd/system/containerd.service.d/limit-nofile.conf mode: "0644" - when: ansible_os_family in ["Common Base Linux Mariner", "Flatcar", "Microsoft Azure Linux"] + when: ansible_facts['os_family'] in ["Common Base Linux Mariner", "Flatcar", "Microsoft Azure Linux"] - name: Create containerd http proxy conf file if needed ansible.builtin.template: @@ -196,6 +199,19 @@ enabled: true state: restarted +- name: Symlink cri-tools + ansible.builtin.file: + src: /usr/local/bin/{{ item }} + dest: /usr/bin/{{ item }} + mode: "0755" + state: link + force: true + loop: + - ctr + - crictl + - critest + when: ansible_facts['os_family'] != "Flatcar" + - name: Delete containerd tarball ansible.builtin.file: path: /tmp/{{ containerd_filename }} @@ -221,11 +237,12 @@ - name: Download runsc for gvisor ansible.builtin.get_url: dest: "{{ sysusr_prefix }}/bin/{{ item }}" - url: https://storage.googleapis.com/gvisor/releases/release/{{ containerd_gvisor_version }}/{{ ansible_architecture }}/{{ item }} + url: https://storage.googleapis.com/gvisor/releases/release/{{ containerd_gvisor_version }}/{{ ansible_facts['architecture'] }}/{{ item }} mode: "0755" owner: root group: root - checksum: sha512:https://storage.googleapis.com/gvisor/releases/release/{{ containerd_gvisor_version }}/{{ ansible_architecture }}/{{ item }}.sha512 + checksum: >- + sha512:https://storage.googleapis.com/gvisor/releases/release/{{ containerd_gvisor_version }}/{{ ansible_facts['architecture'] }}/{{ item }}.sha512 loop: - runsc - containerd-shim-runsc-v1 diff --git a/images/capi/ansible/roles/containerd/tasks/redhat.yml b/images/capi/ansible/roles/containerd/tasks/redhat.yml index 125d02ab78..d2c11c27b0 100644 --- a/images/capi/ansible/roles/containerd/tasks/redhat.yml +++ b/images/capi/ansible/roles/containerd/tasks/redhat.yml @@ -13,7 +13,7 @@ # limitations under the License. --- - name: Install libseccomp package - ansible.builtin.yum: + ansible.builtin.dnf: name: libseccomp state: present lock_timeout: 60 diff --git a/images/capi/ansible/roles/containerd/templates/etc/containerd/config.toml b/images/capi/ansible/roles/containerd/templates/etc/containerd/config.toml index 2625d6e586..4907a601d8 100644 --- a/images/capi/ansible/roles/containerd/templates/etc/containerd/config.toml +++ b/images/capi/ansible/roles/containerd/templates/etc/containerd/config.toml @@ -10,6 +10,10 @@ imports = ["/etc/containerd/conf.d/*.toml"] [plugins] {% if containerd_version is version('2.0.0', '>=') %} +{% if containerd_image_pull_progress_timeout | default('') | length > 0 %} + [plugins.'io.containerd.cri.v1.images'] + image_pull_progress_timeout = "{{ containerd_image_pull_progress_timeout }}" +{% endif %} [plugins.'io.containerd.cri.v1.images'.pinned_images] sandbox = "{{ pause_image }}" [plugins.'io.containerd.cri.v1.images'.registry] diff --git a/images/capi/ansible/roles/ecr_credential_provider/tasks/main.yaml b/images/capi/ansible/roles/ecr_credential_provider/tasks/main.yaml index fd643cd97a..173cb3d8e0 100644 --- a/images/capi/ansible/roles/ecr_credential_provider/tasks/main.yaml +++ b/images/capi/ansible/roles/ecr_credential_provider/tasks/main.yaml @@ -37,7 +37,7 @@ block: - name: Ensure kubelet config exists ansible.builtin.stat: - path: "{{ '/etc/default/kubelet' if ansible_os_family == 'Debian' else '/etc/sysconfig/kubelet' }}" + path: "{{ '/etc/default/kubelet' if ansible_facts['os_family'] == 'Debian' else '/etc/sysconfig/kubelet' }}" register: kubelet_config failed_when: not kubelet_config.stat.exists @@ -45,6 +45,6 @@ when: kubelet_config.stat.exists ansible.builtin.shell: | set -e -o pipefail - sed -Ei 's|^(KUBELET_EXTRA_ARGS.*)|\1 --image-credential-provider-config=/var/usr/ecr-credential-provider/ecr-credential-provider-config --image-credential-provider-bin-dir={{ ecr_credential_provider_install_dir }}|' {{ '/etc/default/kubelet' if ansible_os_family == 'Debian' else '/etc/sysconfig/kubelet' }} + sed -Ei 's|^(KUBELET_EXTRA_ARGS.*)|\1 --image-credential-provider-config=/var/usr/ecr-credential-provider/ecr-credential-provider-config --image-credential-provider-bin-dir={{ ecr_credential_provider_install_dir }}|' {{ '/etc/default/kubelet' if ansible_facts['os_family'] == 'Debian' else '/etc/sysconfig/kubelet' }} args: executable: /bin/bash diff --git a/images/capi/ansible/roles/firstboot/meta/main.yml b/images/capi/ansible/roles/firstboot/meta/main.yml index aad01df663..004bdd1dc4 100644 --- a/images/capi/ansible/roles/firstboot/meta/main.yml +++ b/images/capi/ansible/roles/firstboot/meta/main.yml @@ -17,7 +17,7 @@ dependencies: vars: rpms: "" debs: "" - when: ansible_os_family == "VMware Photon OS" + when: ansible_facts['os_family'] == "VMware Photon OS" - role: setup vars: diff --git a/images/capi/ansible/roles/firstboot/tasks/main.yaml b/images/capi/ansible/roles/firstboot/tasks/main.yaml index 036f44ae74..9cc7900fbe 100644 --- a/images/capi/ansible/roles/firstboot/tasks/main.yaml +++ b/images/capi/ansible/roles/firstboot/tasks/main.yaml @@ -13,8 +13,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -- ansible.builtin.include_tasks: photon.yml - when: ansible_os_family == "VMware Photon OS" +- name: Include Photon firstboot tasks + ansible.builtin.include_tasks: photon.yml + when: ansible_facts['os_family'] == "VMware Photon OS" -- ansible.builtin.include_tasks: qemu.yml +- name: Include QEMU firstboot tasks + ansible.builtin.include_tasks: qemu.yml when: packer_builder_type is search('qemu') diff --git a/images/capi/ansible/roles/firstboot/tasks/qemu.yml b/images/capi/ansible/roles/firstboot/tasks/qemu.yml index 0a166ec03e..93dce9472c 100644 --- a/images/capi/ansible/roles/firstboot/tasks/qemu.yml +++ b/images/capi/ansible/roles/firstboot/tasks/qemu.yml @@ -15,4 +15,5 @@ # no-op task just to have something for the role to do. Right now # all the work happens in the setup role -- ansible.builtin.meta: noop +- name: No-op for QEMU firstboot + ansible.builtin.meta: noop diff --git a/images/capi/ansible/roles/gpu/defaults/main.yml b/images/capi/ansible/roles/gpu/defaults/main.yml index ad75c953e9..3cba115a94 100644 --- a/images/capi/ansible/roles/gpu/defaults/main.yml +++ b/images/capi/ansible/roles/gpu/defaults/main.yml @@ -16,5 +16,5 @@ gpu_amd_usecase: dkms gpu_block_nouveau_loading: false gpu_systemd_networkd_update_initramfs: >- - {%- if ansible_os_family == 'VMware Photon OS' -%} dracut -f{%- elif ansible_os_family == 'Debian' -%} update-initramfs -u{%- endif -%} + {%- if ansible_facts['os_family'] == 'VMware Photon OS' -%} dracut -f{%- elif ansible_facts['os_family'] == 'Debian' -%} update-initramfs -u{%- endif -%} gpu_nvidia_ceph: false diff --git a/images/capi/ansible/roles/gpu/tasks/amd.yml b/images/capi/ansible/roles/gpu/tasks/amd.yml index eb5ef1d52a..43b21d6185 100644 --- a/images/capi/ansible/roles/gpu/tasks/amd.yml +++ b/images/capi/ansible/roles/gpu/tasks/amd.yml @@ -18,12 +18,12 @@ name: root groups: render,video append: true - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Install the .deb for AMDGPU-Install ansible.builtin.apt: deb: https://repo.radeon.com/amdgpu-install/{{ amd_version }}/ubuntu/jammy/amdgpu-install_{{ amd_deb_version }}_all.deb - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Perform a cache update ansible.builtin.apt: @@ -33,19 +33,19 @@ until: apt_lock_status is not failed retries: 5 delay: 10 - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Install packages required for AMD driver installation become: true ansible.builtin.apt: pkg: - - linux-headers-{{ ansible_kernel }} - - linux-modules-extra-{{ ansible_kernel }} + - linux-headers-{{ ansible_facts['kernel'] }} + - linux-modules-extra-{{ ansible_facts['kernel'] }} - build-essential - dkms - rocminfo - clinfo - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Run AMDGPU_Install binary with use-cases ansible.builtin.command: diff --git a/images/capi/ansible/roles/gpu/tasks/nvidia.yml b/images/capi/ansible/roles/gpu/tasks/nvidia.yml index 95b5b1c38d..7629ca9182 100644 --- a/images/capi/ansible/roles/gpu/tasks/nvidia.yml +++ b/images/capi/ansible/roles/gpu/tasks/nvidia.yml @@ -16,7 +16,7 @@ - name: Add NVIDIA package signing key ansible.builtin.apt_key: url: https://nvidia.github.io/libnvidia-container/gpgkey - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Perform a cache update ansible.builtin.apt: @@ -26,7 +26,7 @@ until: apt_lock_status is not failed retries: 5 delay: 10 - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Install packages for building NVIDIA driver kernel module and interacting with s3 endpoint become: true @@ -37,7 +37,7 @@ - dkms - python3-boto3 - python3-botocore - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Make /etc/nvidia/ClientConfigToken directory become: true @@ -119,4 +119,4 @@ pkg: - python3-boto3 - python3-botocore - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" diff --git a/images/capi/ansible/roles/kubernetes/defaults/main.yml b/images/capi/ansible/roles/kubernetes/defaults/main.yml index 8ae5473a0a..6cf40cf148 100644 --- a/images/capi/ansible/roles/kubernetes/defaults/main.yml +++ b/images/capi/ansible/roles/kubernetes/defaults/main.yml @@ -43,6 +43,6 @@ kubernetes_cni_http_checksum: sha256:{{ kubernetes_cni_http_source }}/{{ kuberne kubeadm_template: etc/kubeadm.yml -kubelet_extra_args: --pod-infra-container-image={{ pause_image }} +kubelet_extra_args: "" kubernetes_enable_automatic_resource_sizing: false diff --git a/images/capi/ansible/roles/kubernetes/tasks/azurelinux.yml b/images/capi/ansible/roles/kubernetes/tasks/azurelinux.yml index 8b76bd5f3e..9c524685f6 100644 --- a/images/capi/ansible/roles/kubernetes/tasks/azurelinux.yml +++ b/images/capi/ansible/roles/kubernetes/tasks/azurelinux.yml @@ -21,7 +21,7 @@ gpgkey: "{{ kubernetes_rpm_gpg_key }}" - name: Install Kubernetes - ansible.builtin.yum: + ansible.builtin.dnf: name: "{{ packages }}" allow_downgrade: true state: present diff --git a/images/capi/ansible/roles/kubernetes/tasks/crictl-url.yml b/images/capi/ansible/roles/kubernetes/tasks/crictl-url.yml index a7553da27b..58e1066f62 100644 --- a/images/capi/ansible/roles/kubernetes/tasks/crictl-url.yml +++ b/images/capi/ansible/roles/kubernetes/tasks/crictl-url.yml @@ -19,7 +19,7 @@ dest: /tmp/{{ crictl_filename }} mode: "0600" -- name: Create "{{ sysusrlocal_prefix }}/bin" directory +- name: Create crictl bin directory ansible.builtin.file: state: directory path: "{{ sysusrlocal_prefix }}/bin" diff --git a/images/capi/ansible/roles/kubernetes/tasks/debian.yml b/images/capi/ansible/roles/kubernetes/tasks/debian.yml index 03e11d562e..9a2089466c 100644 --- a/images/capi/ansible/roles/kubernetes/tasks/debian.yml +++ b/images/capi/ansible/roles/kubernetes/tasks/debian.yml @@ -33,4 +33,4 @@ - kubelet={{ kubernetes_deb_version }} - kubeadm={{ kubernetes_deb_version }} - kubectl={{ kubernetes_deb_version }} - - kubernetes-cni{{ '='+kubernetes_cni_deb_version if kubernetes_cni_deb_version else '' }} + - kubernetes-cni{{ '=' + kubernetes_cni_deb_version if kubernetes_cni_deb_version else '' }} diff --git a/images/capi/ansible/roles/kubernetes/tasks/ecrpull.yml b/images/capi/ansible/roles/kubernetes/tasks/ecrpull.yml index 6c601ba7f4..7abc21a7fe 100644 --- a/images/capi/ansible/roles/kubernetes/tasks/ecrpull.yml +++ b/images/capi/ansible/roles/kubernetes/tasks/ecrpull.yml @@ -7,7 +7,7 @@ mode: "0600" - name: Get images list - ansible.builtin.shell: kubeadm config images list --config /etc/kubeadm.yml + ansible.builtin.command: kubeadm config images list --config /etc/kubeadm.yml register: images_list - name: Log into ECR @@ -27,4 +27,4 @@ ansible.builtin.file: path: /etc/kubeadm.yml state: absent - when: ansible_os_family != "Flatcar" + when: ansible_facts['os_family'] != "Flatcar" diff --git a/images/capi/ansible/roles/kubernetes/tasks/kubeadmpull.yml b/images/capi/ansible/roles/kubernetes/tasks/kubeadmpull.yml index 2986de9478..b680b05a05 100644 --- a/images/capi/ansible/roles/kubernetes/tasks/kubeadmpull.yml +++ b/images/capi/ansible/roles/kubernetes/tasks/kubeadmpull.yml @@ -6,10 +6,10 @@ mode: "0600" - name: Kubeadm pull images - ansible.builtin.shell: kubeadm config images pull --config /etc/kubeadm.yml --cri-socket {{ containerd_cri_socket }} + ansible.builtin.command: kubeadm config images pull --config /etc/kubeadm.yml --cri-socket {{ containerd_cri_socket }} - name: Delete kubeadm config ansible.builtin.file: path: /etc/kubeadm.yml state: absent - when: ansible_os_family != "Flatcar" + when: ansible_facts['os_family'] != "Flatcar" diff --git a/images/capi/ansible/roles/kubernetes/tasks/main.yml b/images/capi/ansible/roles/kubernetes/tasks/main.yml index 4bfe1e9111..c2e563d0fe 100644 --- a/images/capi/ansible/roles/kubernetes/tasks/main.yml +++ b/images/capi/ansible/roles/kubernetes/tasks/main.yml @@ -12,42 +12,35 @@ # See the License for the specific language governing permissions and # limitations under the License. --- -- ansible.builtin.import_tasks: debian.yml - when: kubernetes_source_type == "pkg" and ansible_os_family == "Debian" +- name: Import Debian Kubernetes tasks + ansible.builtin.import_tasks: debian.yml + when: kubernetes_source_type == "pkg" and ansible_facts['os_family'] == "Debian" -- ansible.builtin.import_tasks: azurelinux.yml - when: kubernetes_source_type == "pkg" and ansible_os_family in ["Common Base Linux Mariner", "Microsoft Azure Linux"] +- name: Import Azure Linux Kubernetes tasks + ansible.builtin.import_tasks: azurelinux.yml + when: kubernetes_source_type == "pkg" and ansible_facts['os_family'] in ["Common Base Linux Mariner", "Microsoft Azure Linux"] -- ansible.builtin.import_tasks: redhat.yml - when: kubernetes_source_type == "pkg" and ansible_os_family == "RedHat" +- name: Import RedHat Kubernetes tasks + ansible.builtin.import_tasks: redhat.yml + when: kubernetes_source_type == "pkg" and ansible_facts['os_family'] == "RedHat" -- ansible.builtin.import_tasks: photon.yml - when: kubernetes_source_type == "pkg" and ansible_os_family == "VMware Photon OS" +- name: Import Photon Kubernetes tasks + ansible.builtin.import_tasks: photon.yml + when: kubernetes_source_type == "pkg" and ansible_facts['os_family'] == "VMware Photon OS" -- ansible.builtin.import_tasks: url.yml +- name: Import URL Kubernetes tasks + ansible.builtin.import_tasks: url.yml when: kubernetes_source_type == "http" and kubernetes_cni_source_type == "http" # must include crictl-url.yml after installing containerd, # as the cri-containerd tarball also includes crictl. -- ansible.builtin.import_tasks: crictl-url.yml - -- name: Symlink cri-tools - ansible.builtin.file: - src: /usr/local/bin/{{ item }} - dest: /usr/bin/{{ item }} - mode: "0755" - state: link - force: true - loop: - - ctr - - crictl - - critest - when: ansible_os_family != "Flatcar" +- name: Import crictl URL tasks + ansible.builtin.import_tasks: crictl-url.yml - name: Create kubelet default config file ansible.builtin.template: src: etc/sysconfig/kubelet - dest: "{{ '/etc/default/kubelet' if ansible_os_family == 'Debian' else '/etc/sysconfig/kubelet' }}" + dest: "{{ '/etc/default/kubelet' if ansible_facts['os_family'] == 'Debian' else '/etc/sysconfig/kubelet' }}" owner: root group: root mode: "0644" @@ -95,19 +88,19 @@ ansible.builtin.shell: cmd: "{{ sysusr_prefix }}/bin/kubectl completion bash > {{ sysusr_prefix }}/share/bash-completion/completions/kubectl" creates: "{{ sysusr_prefix }}/share/bash-completion/completions/kubectl" - when: ansible_os_family != "Flatcar" + when: ansible_facts['os_family'] != "Flatcar" - name: Generate kubeadm bash completion ansible.builtin.shell: cmd: "{{ sysusr_prefix }}/bin/kubeadm completion bash > {{ sysusr_prefix }}/share/bash-completion/completions/kubeadm" creates: "{{ sysusr_prefix }}/share/bash-completion/completions/kubeadm" - when: ansible_os_family != "Flatcar" + when: ansible_facts['os_family'] != "Flatcar" - name: Generate crictl bash completion ansible.builtin.shell: cmd: "{{ sysusr_prefix }}/bin/crictl completion bash > {{ sysusr_prefix }}/share/bash-completion/completions/crictl" creates: "{{ sysusr_prefix }}/share/bash-completion/completions/crictl" - when: ansible_os_family != "Flatcar" + when: ansible_facts['os_family'] != "Flatcar" - name: Set KUBECONFIG variable and alias ansible.builtin.copy: @@ -122,8 +115,10 @@ ansible.builtin.set_fact: ecr: '{{ kubernetes_container_registry is regex("^[0-9]{12}.dkr.ecr.[^.]+.amazonaws.com$") }}' -- ansible.builtin.import_tasks: kubeadmpull.yml - when: (kubernetes_source_type == "pkg" and not ecr) or ansible_os_family == "Flatcar" +- name: Import kubeadm pull tasks + ansible.builtin.import_tasks: kubeadmpull.yml + when: (kubernetes_source_type == "pkg" and not ecr) or ansible_facts['os_family'] == "Flatcar" -- ansible.builtin.import_tasks: ecrpull.yml +- name: Import ECR pull tasks + ansible.builtin.import_tasks: ecrpull.yml when: kubernetes_source_type != "http" and ecr diff --git a/images/capi/ansible/roles/kubernetes/tasks/photon.yml b/images/capi/ansible/roles/kubernetes/tasks/photon.yml index 1fc3f2f956..81bc532a9f 100644 --- a/images/capi/ansible/roles/kubernetes/tasks/photon.yml +++ b/images/capi/ansible/roles/kubernetes/tasks/photon.yml @@ -25,4 +25,4 @@ kubelet-{{ kubernetes_rpm_version }} kubeadm-{{ kubernetes_rpm_version }} kubectl-{{ kubernetes_rpm_version }} - kubernetes-cni{{ '-'+kubernetes_cni_rpm_version if kubernetes_cni_rpm_version else '' }} + kubernetes-cni{{ '-' + kubernetes_cni_rpm_version if kubernetes_cni_rpm_version else '' }} diff --git a/images/capi/ansible/roles/kubernetes/tasks/redhat.yml b/images/capi/ansible/roles/kubernetes/tasks/redhat.yml index 829ec8064f..1264687951 100644 --- a/images/capi/ansible/roles/kubernetes/tasks/redhat.yml +++ b/images/capi/ansible/roles/kubernetes/tasks/redhat.yml @@ -21,7 +21,7 @@ gpgkey: "{{ kubernetes_rpm_gpg_key }}" - name: Install Kubernetes - ansible.builtin.yum: + ansible.builtin.dnf: name: "{{ packages }}" allow_downgrade: true state: present @@ -33,4 +33,4 @@ - kubelet-{{ kubernetes_rpm_version }} - kubeadm-{{ kubernetes_rpm_version }} - kubectl-{{ kubernetes_rpm_version }} - - kubernetes-cni{{ '-'+kubernetes_cni_rpm_version if kubernetes_cni_rpm_version else '' }} + - kubernetes-cni{{ '-' + kubernetes_cni_rpm_version if kubernetes_cni_rpm_version else '' }} diff --git a/images/capi/ansible/roles/kubernetes/tasks/url.yml b/images/capi/ansible/roles/kubernetes/tasks/url.yml index afcd5f0a19..0e8185f3d9 100644 --- a/images/capi/ansible/roles/kubernetes/tasks/url.yml +++ b/images/capi/ansible/roles/kubernetes/tasks/url.yml @@ -43,10 +43,7 @@ - name: Download Kubernetes binaries ansible.builtin.get_url: url: "{{ kubernetes_http_source }}/{{ kubernetes_semver }}/bin/linux/{{ kubernetes_resolved_arch }}/{{ item }}" - # TODO(akutz) Write a script to separately download the checksum - # and verify the associated file using the correct - # checksum file format - # checksum: "sha1:{{ kubernetes_http_source }}/bin/linux/amd64/{{ item }}.sha1" + checksum: "sha256:{{ kubernetes_http_source }}/{{ kubernetes_semver }}/bin/linux/{{ kubernetes_resolved_arch }}/{{ item }}.sha256" dest: "{{ sysusr_prefix }}/bin/{{ item }}" mode: "0755" owner: root @@ -56,10 +53,7 @@ - name: Download Kubernetes images ansible.builtin.get_url: url: "{{ kubernetes_http_source }}/{{ kubernetes_semver }}/bin/linux/{{ kubernetes_resolved_arch }}/{{ item }}" - # TODO(akutz) Write a script to separately download the checksum - # and verify the associated file using the correct - # checksum file format - # checksum: "sha1:{{ kubernetes_http_source }}/bin/linux/amd64/{{ item }}.sha1" + checksum: "sha256:{{ kubernetes_http_source }}/{{ kubernetes_semver }}/bin/linux/{{ kubernetes_resolved_arch }}/{{ item }}.sha256" dest: /tmp/{{ item }} mode: "0600" loop: "{{ kubernetes_imgs }}" @@ -74,7 +68,7 @@ - name: Modify Kubernetes images # Strip the arch from the name of the image to prevent image from being pulled again by kubeadm - ansible.builtin.shell: /tmp/modify-k8s-img.sh {{ item }} + ansible.builtin.command: /tmp/modify-k8s-img.sh {{ item }} loop: "{{ kubernetes_imgs }}" - name: Remove Kubernetes image modification script @@ -83,7 +77,9 @@ path: /tmp/modify-k8s-img.sh - name: Load Kubernetes images - ansible.builtin.shell: CONTAINERD_NAMESPACE="k8s.io" {{ sysusr_prefix }}/bin/ctr --address={{ containerd_cri_socket }} images import /tmp/{{ item }} + ansible.builtin.command: "{{ sysusr_prefix }}/bin/ctr --address={{ containerd_cri_socket }} images import /tmp/{{ item }}" + environment: + CONTAINERD_NAMESPACE: k8s.io loop: "{{ kubernetes_imgs }}" - name: Remove Kubernetes images diff --git a/images/capi/ansible/roles/load_additional_components/tasks/main.yml b/images/capi/ansible/roles/load_additional_components/tasks/main.yml index 1298a4d305..9a140c8056 100644 --- a/images/capi/ansible/roles/load_additional_components/tasks/main.yml +++ b/images/capi/ansible/roles/load_additional_components/tasks/main.yml @@ -12,16 +12,20 @@ # See the License for the specific language governing permissions and # limitations under the License. --- -- ansible.builtin.import_tasks: executables.yml +- name: Import additional executables tasks + ansible.builtin.import_tasks: executables.yml when: additional_executables | bool -- ansible.builtin.import_tasks: registry.yml +- name: Import additional registry images tasks + ansible.builtin.import_tasks: registry.yml when: additional_registry_images | bool -- ansible.builtin.import_tasks: url.yml +- name: Import additional URL images tasks + ansible.builtin.import_tasks: url.yml when: additional_url_images | bool # We have to use include_tasks for the S3 task due to ansible pre-processing the task when import_tasks is used. # This causes a failure when using any other additional_component. -- ansible.builtin.include_tasks: s3.yml +- name: Include additional S3 tasks + ansible.builtin.include_tasks: s3.yml when: additional_s3 | bool diff --git a/images/capi/ansible/roles/load_additional_components/tasks/registry.yml b/images/capi/ansible/roles/load_additional_components/tasks/registry.yml index 10651158e0..8f7e675e19 100644 --- a/images/capi/ansible/roles/load_additional_components/tasks/registry.yml +++ b/images/capi/ansible/roles/load_additional_components/tasks/registry.yml @@ -13,7 +13,9 @@ # limitations under the License. --- - name: Pull additional images from registry - ansible.builtin.shell: CONTAINERD_NAMESPACE="k8s.io" {{ sysusr_prefix }}/bin/ctr --address={{ containerd_cri_socket }} images pull {{ item }} + ansible.builtin.command: "{{ sysusr_prefix }}/bin/ctr --address={{ containerd_cri_socket }} images pull {{ item }}" + environment: + CONTAINERD_NAMESPACE: k8s.io loop: "{{ additional_registry_images_list.split(',') }}" retries: 5 delay: 3 diff --git a/images/capi/ansible/roles/load_additional_components/tasks/s3.yml b/images/capi/ansible/roles/load_additional_components/tasks/s3.yml index b2f469efb7..401c3d7d3f 100644 --- a/images/capi/ansible/roles/load_additional_components/tasks/s3.yml +++ b/images/capi/ansible/roles/load_additional_components/tasks/s3.yml @@ -18,7 +18,7 @@ pkg: - python3-boto3 - python3-botocore - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" # TODO: We should probably think of an approach to allow a loop here to prevent multiple calls # and as such, multiple installs/removals of the boto packages. @@ -43,4 +43,4 @@ pkg: - python3-boto3 - python3-botocore - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" diff --git a/images/capi/ansible/roles/load_additional_components/tasks/url.yml b/images/capi/ansible/roles/load_additional_components/tasks/url.yml index 12ae550189..82f8473237 100644 --- a/images/capi/ansible/roles/load_additional_components/tasks/url.yml +++ b/images/capi/ansible/roles/load_additional_components/tasks/url.yml @@ -29,7 +29,9 @@ delay: 3 - name: Load additional images - ansible.builtin.shell: CONTAINERD_NAMESPACE="k8s.io" {{ sysusr_prefix }}/bin/ctr --address={{ containerd_cri_socket }} images import --no-unpack {{ item.dest }} + ansible.builtin.command: "{{ sysusr_prefix }}/bin/ctr --address={{ containerd_cri_socket }} images import --no-unpack {{ item.dest }}" + environment: + CONTAINERD_NAMESPACE: k8s.io loop: "{{ images.results }}" - name: Remove downloaded files diff --git a/images/capi/ansible/roles/node/defaults/main.yml b/images/capi/ansible/roles/node/defaults/main.yml index db9d99f458..789af2027e 100644 --- a/images/capi/ansible/roles/node/defaults/main.yml +++ b/images/capi/ansible/roles/node/defaults/main.yml @@ -45,7 +45,7 @@ azurelinux_rpms: - yum-utils - lsof -# Used for RedHat based distributions =! 7 (ex. RHEL-8, RockyLinux-8, RockyLinux-9 etc.) +# Used for RedHat based distributions != 7 (ex. RockyLinux-9 etc.) rh8_rpms: - curl - yum-utils @@ -119,15 +119,19 @@ common_raw_photon_rpms: [] # photon and flatcar do not have backward compatibility for legacy distro behavior for sysctl.conf by default # as it uses systemd-sysctl. set this var so we can use for sysctl conf file value. sysctl_conf_file: >- - {{ '/etc/sysctl.d/99-sysctl.conf' if ansible_os_family in ['Common Base Linux Mariner', 'Flatcar', 'Microsoft Azure Linux', 'VMware Photon OS'] + {{ '/etc/sysctl.d/99-sysctl.conf' if ansible_facts['os_family'] in ['Common Base Linux Mariner', 'Flatcar', 'Microsoft Azure Linux', 'VMware Photon OS'] else '/etc/sysctl.conf' }} -pause_image: registry.k8s.io/pause:3.10 +pause_image: registry.k8s.io/pause:3.10.2 containerd_additional_settings: leak_local_mdns_to_dns: false build_target: virt cloud_cfg_file: /etc/cloud/cloud.cfg -external_binary_path: "{{ '/opt/bin' if ansible_os_family == 'Flatcar' else '/usr/local/bin' }}" +external_binary_path: "{{ '/opt/bin' if ansible_facts['os_family'] == 'Flatcar' else '/usr/local/bin' }}" # Enable containerd trace audit in auditd, default: false. enable_containerd_audit: false + +# Disable flatcar usb devices, default: true +# See hardening guide: https://www.flatcar.org/docs/latest/setup/security/hardening-guide/#disable-usb +disable_flatcar_usb: true diff --git a/images/capi/ansible/roles/node/files/usr/local/bin/etcd-network-tuning.sh b/images/capi/ansible/roles/node/files/usr/local/bin/etcd-network-tuning.sh index 03713816a9..365af7b16d 100755 --- a/images/capi/ansible/roles/node/files/usr/local/bin/etcd-network-tuning.sh +++ b/images/capi/ansible/roles/node/files/usr/local/bin/etcd-network-tuning.sh @@ -18,6 +18,8 @@ set -o errexit # exits immediately on any unexpected error (does not bypass tra set -o nounset # will error if variables are used without first being defined set -o pipefail # any non-zero exit code in a piped command causes the pipeline to fail with that code +export PATH="${PATH}:/usr/sbin" + trap on_exit ERR on_exit() { echo "Error setting etcd network tuning parameters for interface: ${DEV}" | systemd-cat -p emerg -t etcd-tuning diff --git a/images/capi/ansible/roles/node/meta/main.yml b/images/capi/ansible/roles/node/meta/main.yml index b798165d1c..573e70fb39 100644 --- a/images/capi/ansible/roles/node/meta/main.yml +++ b/images/capi/ansible/roles/node/meta/main.yml @@ -17,26 +17,26 @@ dependencies: vars: rpms: "{{ common_rpms + al2_rpms + lookup('vars', 'common_' + build_target + '_rpms') }}" debs: "{{ common_debs }}" - when: ansible_distribution == "Amazon" and ansible_distribution_version == "2" + when: ansible_facts['distribution'] == "Amazon" and ansible_facts['distribution_version'] == "2" - role: setup vars: rpms: "{{ common_rpms + al2023_rpms + lookup('vars', 'common_' + build_target + '_rpms') }}" debs: "{{ common_debs }}" - when: ansible_distribution == "Amazon" and ansible_distribution_version == "2023" + when: ansible_facts['distribution'] == "Amazon" and ansible_facts['distribution_version'] == "2023" - role: setup vars: rpms: "{{ common_rpms }}" debs: "{{ common_debs }}" - when: packer_builder_type == "oracle-oci" and ansible_architecture == "aarch64" + when: packer_builder_type == "oracle-oci" and ansible_facts['architecture'] == "aarch64" - role: setup vars: rpms: >- - {{ (common_photon_rpms + lookup('vars', 'photon_' + ansible_distribution_major_version + '_rpms' ) + {{ (common_photon_rpms + lookup('vars', 'photon_' + ansible_facts['distribution_major_version'] + '_rpms' ) + lookup('vars', 'common_' + build_target + '_photon_rpms')) }} - when: ansible_distribution == "VMware Photon OS" + when: ansible_facts['distribution'] == "VMware Photon OS" - role: setup vars: @@ -44,11 +44,11 @@ dependencies: {{ ( common_rpms + rh8_rpms + lookup('vars', 'common_' + build_target + '_rpms') ) }} debs: "{{ common_debs + lookup('vars', 'common_' + build_target + '_debs') }}" when: > - ansible_distribution not in ["VMware Photon OS", "Amazon"] - and not (packer_builder_type == "oracle-oci" and ansible_architecture == "aarch64") + ansible_facts['distribution'] not in ["VMware Photon OS", "Amazon"] + and not (packer_builder_type == "oracle-oci" and ansible_facts['architecture'] == "aarch64") and not packer_builder_type is search('qemu') - role: setup vars: rpms: "{{ common_rpms + azurelinux_rpms + lookup('vars', 'common_' + build_target + '_rpms') }}" - when: ansible_distribution in ["Common Base Linux Mariner", "Microsoft Azure Linux"] + when: ansible_facts['distribution'] in ["Common Base Linux Mariner", "Microsoft Azure Linux"] diff --git a/images/capi/ansible/roles/node/tasks/flatcar.yml b/images/capi/ansible/roles/node/tasks/flatcar.yml new file mode 100644 index 0000000000..1bf73be586 --- /dev/null +++ b/images/capi/ansible/roles/node/tasks/flatcar.yml @@ -0,0 +1,40 @@ +# Copyright 2025 The Kubernetes Authors. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +# See hardening guide: https://www.flatcar.org/docs/latest/setup/security/hardening-guide/#disable-usb +- name: Create /etc/modprobe.d directory + ansible.builtin.file: + path: /etc/modprobe.d + state: directory + owner: root + group: root + mode: '0755' + when: disable_flatcar_usb + +- name: Blacklist usb-storage module + ansible.builtin.copy: + dest: /etc/modprobe.d/blacklist.conf + content: | + blacklist usb-storage + owner: root + group: root + mode: '0644' + when: disable_flatcar_usb + +# sed is used here instead of the ansible.builtin.lineinfile module +# because of the read-only filesystem on Flatcar in /etc. +- name: Set default HOME_MODE in login.defs + ansible.builtin.shell: sed -ri "s/^#?HOME_MODE\>.*/HOME_MODE 0700/" /etc/login.defs + tags: + - skip_ansible_lint diff --git a/images/capi/ansible/roles/node/tasks/main.yml b/images/capi/ansible/roles/node/tasks/main.yml index c297eb750e..64e085f40e 100644 --- a/images/capi/ansible/roles/node/tasks/main.yml +++ b/images/capi/ansible/roles/node/tasks/main.yml @@ -12,20 +12,26 @@ # See the License for the specific language governing permissions and # limitations under the License. --- -- ansible.builtin.import_tasks: photon.yml - when: ansible_os_family == "VMware Photon OS" +- name: Import Photon node tasks + ansible.builtin.import_tasks: photon.yml + when: ansible_facts['os_family'] == "VMware Photon OS" -- ansible.builtin.import_tasks: amazonLinux.yml - when: ansible_distribution == "Amazon" +- name: Import Amazon Linux node tasks + ansible.builtin.import_tasks: amazonLinux.yml + when: ansible_facts['distribution'] == "Amazon" # This is required until https://github.com/ansible/ansible/issues/77537 is fixed and used. - name: Override Flatcar's OS family ansible.builtin.set_fact: ansible_os_family: Flatcar - when: ansible_os_family == "Flatcar Container Linux by Kinvolk" + when: ansible_facts['os_family'] == "Flatcar Container Linux by Kinvolk" tags: - facts +- name: Import Flatcar node tasks + ansible.builtin.import_tasks: flatcar.yml + when: ansible_facts['os_family'] == "Flatcar" + - name: Ensure overlay module is present community.general.modprobe: name: overlay @@ -68,18 +74,18 @@ - name: Disable swap memory ansible.builtin.shell: | swapoff -a - when: ansible_memory_mb.swap.total != 0 + when: ansible_facts['memory_mb'].swap.total != 0 - name: Edit fstab file to disable swap ansible.builtin.shell: sed -ri '/\sswap\s/s/^#?/#/' /etc/fstab - when: ansible_memory_mb.swap.total != 0 + when: ansible_facts['memory_mb'].swap.total != 0 - name: Disable conntrackd service ansible.builtin.systemd: name: conntrackd state: stopped enabled: false - when: ansible_os_family not in ["Common Base Linux Mariner", "Debian", "Flatcar", "Microsoft Azure Linux"] + when: ansible_facts['os_family'] not in ["Common Base Linux Mariner", "Debian", "Flatcar", "Microsoft Azure Linux"] - name: Ensure auditd is running and comes on at reboot ansible.builtin.service: @@ -89,7 +95,7 @@ - name: Configure auditd rules for containerd ansible.builtin.copy: - src: "etc/audit/rules.d/containerd.rules{{ '-flatcar' if ansible_os_family == 'Flatcar' else '' }}" + src: "etc/audit/rules.d/containerd.rules{{ '-flatcar' if ansible_facts['os_family'] == 'Flatcar' else '' }}" dest: /etc/audit/rules.d/containerd.rules owner: root group: root @@ -103,7 +109,7 @@ state: present sysctl_set: true reload: true - when: ansible_distribution == "Ubuntu" + when: ansible_facts['distribution'] == "Ubuntu" - name: Set transparent huge pages to madvise ansible.builtin.lineinfile: @@ -111,7 +117,7 @@ backrefs: true regexp: ^(?!.*transparent_hugepage=madvise)(GRUB_CMDLINE_LINUX=.*)("$) line: \1 transparent_hugepage=madvise" - when: ansible_os_family == "RedHat" + when: ansible_facts['os_family'] == "RedHat" - name: Copy udev etcd network tuning rules ansible.builtin.template: @@ -124,7 +130,3 @@ src: usr/local/bin/etcd-network-tuning.sh dest: "{{ external_binary_path }}/etcd-network-tuning.sh" mode: "0755" - -- name: Set default HOME_MODE in login.defs (Flatcar) - ansible.builtin.shell: sed -ri "s/^#?HOME_MODE\>.*/HOME_MODE 0700/" /etc/login.defs - when: ansible_os_family == "Flatcar" diff --git a/images/capi/ansible/roles/providers/files/maas/curtin/NOTICE.md b/images/capi/ansible/roles/providers/files/maas/curtin/NOTICE.md deleted file mode 100644 index 21e4b4f83f..0000000000 --- a/images/capi/ansible/roles/providers/files/maas/curtin/NOTICE.md +++ /dev/null @@ -1,3 +0,0 @@ -# Attribution -All the script in this folder is derived from the original work by Alexsander de Souza (Canonical), -available at https://github.com/canonical/packer-maas. \ No newline at end of file diff --git a/images/capi/ansible/roles/providers/files/maas/curtin/curtin-hooks b/images/capi/ansible/roles/providers/files/maas/curtin/curtin-hooks deleted file mode 100644 index 50d8385521..0000000000 --- a/images/capi/ansible/roles/providers/files/maas/curtin/curtin-hooks +++ /dev/null @@ -1,145 +0,0 @@ -#!/usr/bin/env python3 -# -# This original script was copied from: -# Source: https://github.com/canonical/packer-maas -# Original Author: Alexsander de Souza -# and modified by the image-builder team - - -import os -import platform -import random -import shutil -import socket -import string -import sys - -from curtin import distro, util -from curtin.commands import apt_config, curthooks -from curtin.config import load_command_config -from curtin.log import DEBUG, LOG, basicConfig -from curtin.paths import target_path -from curtin.util import ChrootableTarget, load_command_environment - - -def run_hook_in_target(target, hook): - """Look for "hook" in "target" and run in a chroot""" - target_hook = target_path(target, "/curtin/" + hook) - if os.path.isfile(target_hook): - LOG.debug("running %s" % target_hook) - with ChrootableTarget(target=target) as in_chroot: - in_chroot.subp(["/curtin/" + hook]) - return True - return False - - -def curthook(cfg, target, state): - """Configure network and bootloader""" - LOG.info("Running curtin builtin curthooks") - state_etcd = os.path.split(state["fstab"])[0] - machine = platform.machine() - - distro_info = distro.get_distroinfo(target=target) - if not distro_info: - raise RuntimeError("Failed to determine target distro") - osfamily = distro_info.family - LOG.info( - "Configuring target system for distro: %s osfamily: %s", - distro_info.variant, - osfamily, - ) - - sources = cfg.get("sources", {}) - dd_image = len(util.get_dd_images(sources)) > 0 - - curthooks.disable_overlayroot(cfg, target) - curthooks.disable_update_initramfs(cfg, target, machine) - curthooks.install_missing_packages(cfg, target, osfamily=osfamily) - - if not dd_image: - curthooks.configure_iscsi(cfg, state_etcd, target, osfamily=osfamily) - curthooks.configure_mdadm(cfg, state_etcd, target, osfamily=osfamily) - curthooks.copy_fstab(state.get("fstab"), target) - curthooks.add_swap(cfg, target, state.get("fstab")) - - run_hook_in_target(target, "install-custom-packages") - - if not dd_image: - curthooks.setup_kernel_img_conf(target) - - crypttab_location = os.path.join(os.path.split(state["fstab"])[0], "crypttab") - if os.path.exists(crypttab_location): - curthooks.copy_crypttab(crypttab_location, target) - - udev_rules_d = os.path.join(state["scratch"], "rules.d") - if os.path.isdir(udev_rules_d): - curthooks.copy_dname_rules(udev_rules_d, target) - - apt_config.apply_debconf_selections(cfg, target) - - curthooks.apply_networking(target, state) - curthooks.handle_pollinate_user_agent(cfg, target) - - # re-enable update_initramfs - curthooks.enable_update_initramfs(cfg, target, machine) - curthooks.update_initramfs(target, all_kernels=True) - - run_hook_in_target(target, "setup-bootloader") - generate_unique_iscsi_initiator_name(target) - - -def generate_random_id(length): - return ''.join(random.choices(string.hexdigits.lower(), k=length)) - - -def generate_unique_iscsi_initiator_name(target): - """Generate a unique iSCSI initiator name for the target.""" - base_iqn_name = "iqn.2004-10.com.ubuntu" - random_id = generate_random_id(4) - initiator_name = f"{base_iqn_name}:{random_id}:{socket.gethostname()}" - header = "## This file is automatically generated by curtin ##\n" - LOG.info("Generated unique iSCSI initiator name: %s", initiator_name) - - if not os.path.exists(target + "/etc/iscsi"): - LOG.warning("Target directory " + target + "/etc/iscsi does not exist." - "Skiping iSCSI initiator name generation.") - return - - if not os.path.exists(target + "/etc/iscsi/initiatorname.iscsi"): - LOG.warning("Target file " + target + "/etc/iscsi/initiatorname.iscsi does not exist." - "Skiping iSCSI initiator name generation.") - return - - # why 127 characters? https://kb.netapp.com/on-prem/ontap/da/SAN/SAN-KBs/What_is_the_maximum_length_of_a_iSCSI_iqn_name - if len(initiator_name) > 127: - LOG.error("iSCSI initiator name exceeds 127 characters: " + initiator_name) - raise ValueError("iSCSI initiator name exceeds 127 characters") - - try: - with open(target + "/etc/iscsi/initiatorname.iscsi", "w") as f: - f.write(header + "InitiatorName=%s\n" % initiator_name) - LOG.info("Wrote initiator name to " + target + "/etc/iscsi/initiatorname.iscsi") - except IOError as e: - LOG.error("Failed to write iSCSI initiator name: %s", e) - raise RuntimeError("Failed to write iSCSI initiator name") from e - - -def cleanup(): - """Remove curtin-hooks so its as if we were never here.""" - curtin_dir = os.path.dirname(__file__) - shutil.rmtree(curtin_dir) - - -def main(): - state = load_command_environment() - config = load_command_config(None, state) - target = state["target"] - - basicConfig(stream=sys.stderr, verbosity=DEBUG) - - curthook(config, target, state) - cleanup() - - -if __name__ == "__main__": - main() diff --git a/images/capi/ansible/roles/providers/files/maas/curtin/install-custom-packages b/images/capi/ansible/roles/providers/files/maas/curtin/install-custom-packages deleted file mode 100644 index b8262897ce..0000000000 --- a/images/capi/ansible/roles/providers/files/maas/curtin/install-custom-packages +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash -ex -# -exit 0 \ No newline at end of file diff --git a/images/capi/ansible/roles/providers/files/maas/curtin/setup-bootloader b/images/capi/ansible/roles/providers/files/maas/curtin/setup-bootloader deleted file mode 100644 index eb1882c70a..0000000000 --- a/images/capi/ansible/roles/providers/files/maas/curtin/setup-bootloader +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/bash -ex -# -# This script was based on: -# Source: https://github.com/canonical/packer-maas -# Original Author: Alexsander de Souza -# and modified by the image-builder team - -export DEBIAN_FRONTEND=noninteractive - -# Clean up remnants from packer-maas vm install -rm /var/cache/debconf/config.dat -dpkg --configure -a - -# Update the package lists before attempting to install the kernel -apt-get update - -if [ -d /sys/firmware/efi ]; then - echo "EFI MODE!" - dpkg-reconfigure grub-efi-amd64 - update-grub - - grub-install \ - --target=x86_64-efi \ - --efi-directory=/boot/efi \ - --bootloader-id=ubuntu \ - --recheck - update-initramfs -uk all - efibootmgr -v -else - echo "BIOS MODE!" - apt-get remove -y --allow-change-held-packages --allow-remove-essential grub-efi-amd64 grub-efi-amd64-signed shim-signed - apt-get install -y grub-pc - dpkg-reconfigure grub-pc - update-grub - DEVICE=$(findmnt -no SOURCE "/") - BOOT_DISK=$(lsblk -no PKNAME "$DEVICE") - grub-install /dev/"$BOOT_DISK" - update-initramfs -uk all -fi diff --git a/images/capi/ansible/roles/providers/files/usr/lib/python3/dist-packages/cloudinit/sources/DataSourceEc2Kubernetes.py b/images/capi/ansible/roles/providers/files/usr/lib/python3/dist-packages/cloudinit/sources/DataSourceEc2Kubernetes.py index b0d311bc09..fab384a917 100644 --- a/images/capi/ansible/roles/providers/files/usr/lib/python3/dist-packages/cloudinit/sources/DataSourceEc2Kubernetes.py +++ b/images/capi/ansible/roles/providers/files/usr/lib/python3/dist-packages/cloudinit/sources/DataSourceEc2Kubernetes.py @@ -84,6 +84,15 @@ def _get_data(self): ) LOG.info("User-data before update:[\n%s]", self.userdata_raw) secret_userdata = "/etc/secret-userdata.txt" + # Check if secret-userdata.txt exists (written by boothook for MachineDeployment/ControlPlane nodes) + # For MachinePool (ASG), this file won't exist as userdata is passed directly via EC2 metadata + if not os.path.exists(secret_userdata): + LOG.info( + "Secret userdata file %s not found. Something might have failed or this is a MachinePool/ASG node." + "Using original userdata from EC2 metadata.", + secret_userdata, + ) + return True # Get the boothook output, save it as user-data # TODO: work with upstream to put this somewhere more sensible like: # /var/lib/cloud/instances/{{v1.instance_id}}/ec2-kubernetes-userdata.txt @@ -116,8 +125,17 @@ def _get_data(self): class DataSourceEc2KubernetesLocal(DataSourceEc2Kubernetes): + # init-local runs before networking is available. The parent + # DataSourceEc2._get_data() crawls the IMDS, which requires network. + # Without it the TCP connection retries for ~232s before timing out. + # Return False so cloud-init moves quickly to the init-network phase + # where DataSourceEc2Kubernetes runs with full network access. def _get_data(self): - return super(DataSourceEc2KubernetesLocal, self).get_data() + LOG.debug( + "Skipping metadata crawl in init-local phase (no network). " + "DataSourceEc2Kubernetes will run in init-network phase." + ) + return False # Used to match classes to dependencies diff --git a/images/capi/ansible/roles/providers/tasks/aws.yml b/images/capi/ansible/roles/providers/tasks/aws.yml index 2acecf9cd1..fabe8bf813 100644 --- a/images/capi/ansible/roles/providers/tasks/aws.yml +++ b/images/capi/ansible/roles/providers/tasks/aws.yml @@ -12,8 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. --- -- ansible.builtin.include_tasks: awscliv2.yml - when: ansible_distribution != "Amazon" +- name: Include AWS CLI v2 tasks + ansible.builtin.include_tasks: awscliv2.yml + when: ansible_facts['distribution'] != "Amazon" # Remove after https://github.com/aws/amazon-ssm-agent/issues/235 is fixed. - name: Install aws agents RPM on Redhat distributions @@ -24,33 +25,33 @@ with_items: - "{{ amazon_ssm_agent_rpm }}" when: - - ansible_os_family == "RedHat" - - ansible_distribution != "Amazon" + - ansible_facts['os_family'] == "RedHat" + - ansible_facts['distribution'] != "Amazon" - name: Ensure ssm agent is running RPM ansible.builtin.service: name: amazon-ssm-agent state: started enabled: true - when: ansible_os_family == "RedHat" + when: ansible_facts['os_family'] == "RedHat" - name: Install aws agents Ubuntu - ansible.builtin.shell: snap install amazon-ssm-agent --classic - when: ansible_distribution == "Ubuntu" + ansible.builtin.command: snap install amazon-ssm-agent --classic + when: ansible_facts['distribution'] == "Ubuntu" - name: Ensure ssm agent is running Ubuntu ansible.builtin.service: name: snap.amazon-ssm-agent.amazon-ssm-agent.service state: started enabled: true - when: ansible_distribution == "Ubuntu" + when: ansible_facts['distribution'] == "Ubuntu" - name: Disable Hyper-V KVP protocol daemon on Ubuntu ansible.builtin.systemd: name: hv-kvp-daemon state: stopped enabled: false - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Create cloud-init custom data source list ansible.builtin.copy: @@ -59,7 +60,7 @@ owner: root group: root mode: "0644" - when: ansible_distribution == "Ubuntu" and ansible_distribution_version is version('22.04', '>=') + when: ansible_facts['distribution'] == "Ubuntu" and ansible_facts['distribution_version'] is version('22.04', '>=') - name: Create custom cloud-init data source ansible.builtin.copy: @@ -68,4 +69,4 @@ owner: root group: root mode: "0644" - when: ansible_distribution == "Ubuntu" and ansible_distribution_version is version('22.04', '>=') + when: ansible_facts['distribution'] == "Ubuntu" and ansible_facts['distribution_version'] is version('22.04', '>=') diff --git a/images/capi/ansible/roles/providers/tasks/awscliv2.yml b/images/capi/ansible/roles/providers/tasks/awscliv2.yml index 7386d8dcf4..4e3da2db3d 100644 --- a/images/capi/ansible/roles/providers/tasks/awscliv2.yml +++ b/images/capi/ansible/roles/providers/tasks/awscliv2.yml @@ -1,10 +1,10 @@ --- -- name: Upgrade pip to latest +- name: Upgrade pip to latest # noqa: package-latest ansible.builtin.pip: name: pip executable: pip3 state: latest - when: ansible_os_family == "Flatcar" + when: ansible_facts['os_family'] == "Flatcar" - name: Install aws clients via pip ansible.builtin.pip: @@ -13,15 +13,15 @@ vars: packages: - awscli - when: ansible_os_family == "Flatcar" + when: ansible_facts['os_family'] == "Flatcar" - name: Install AWS CLI prequisites - ansible.builtin.yum: + ansible.builtin.dnf: name: - gnupg - unzip state: present - when: ansible_distribution == "RedHat" + when: ansible_facts['distribution'] == "RedHat" - name: Install AWS CLI prerequisites ansible.builtin.apt: @@ -29,7 +29,7 @@ - gnupg - unzip state: present - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Import AWS public key ansible.builtin.shell: | @@ -65,39 +65,39 @@ EOF gpg --import aws-public-key rm aws-public-key - when: ansible_os_family != "Flatcar" + when: ansible_facts['os_family'] != "Flatcar" - name: Download AWS CLI v2 archive signature file ansible.builtin.get_url: - url: https://awscli.amazonaws.com/awscli-exe-linux-{{ ansible_architecture }}.zip.sig + url: https://awscli.amazonaws.com/awscli-exe-linux-{{ ansible_facts['architecture'] }}.zip.sig dest: /tmp/awscliv2.zip.sig mode: "0644" - when: ansible_os_family != "Flatcar" + when: ansible_facts['os_family'] != "Flatcar" - name: Download AWS CLI v2 archive ansible.builtin.get_url: - url: https://awscli.amazonaws.com/awscli-exe-linux-{{ ansible_architecture }}.zip + url: https://awscli.amazonaws.com/awscli-exe-linux-{{ ansible_facts['architecture'] }}.zip dest: /tmp/awscliv2.zip mode: "0644" - when: ansible_os_family != "Flatcar" + when: ansible_facts['os_family'] != "Flatcar" - name: Verify AWS CLI v2 archive ansible.builtin.command: gpg --verify /tmp/awscliv2.zip.sig /tmp/awscliv2.zip - when: ansible_os_family != "Flatcar" + when: ansible_facts['os_family'] != "Flatcar" - name: Unzip AWS CLI v2 archive ansible.builtin.unarchive: src: /tmp/awscliv2.zip dest: /tmp remote_src: true - when: ansible_os_family != "Flatcar" + when: ansible_facts['os_family'] != "Flatcar" - name: Install AWS CLI v2 ansible.builtin.command: /tmp/aws/install --update -i /usr/local/aws-cli -b /usr/local/sbin - when: ansible_os_family != "Flatcar" + when: ansible_facts['os_family'] != "Flatcar" - name: Remove temporary files ansible.builtin.file: path: /tmp/aws* state: absent - when: ansible_os_family != "Flatcar" + when: ansible_facts['os_family'] != "Flatcar" diff --git a/images/capi/ansible/roles/providers/tasks/azure.yml b/images/capi/ansible/roles/providers/tasks/azure.yml index 2ba716b833..3aa146ea94 100644 --- a/images/capi/ansible/roles/providers/tasks/azure.yml +++ b/images/capi/ansible/roles/providers/tasks/azure.yml @@ -12,7 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. --- -- ansible.builtin.import_tasks: azurecli.yml +- name: Import Azure CLI tasks + ansible.builtin.import_tasks: azurecli.yml when: debug_tools | bool - name: Configure PTP @@ -22,7 +23,10 @@ line: refclock PHC /dev/ptp0 poll 3 dpoll -2 offset 0 mode: "0644" # Flatcar now includes this by default as of 3975.2.0 which causes this task to fail - when: ansible_os_family != "Flatcar" or (ansible_os_family == "Flatcar" and ansible_distribution_version is version('3975.2.0', '<')) + when: > + ansible_facts['os_family'] != "Flatcar" or + (ansible_facts['os_family'] == "Flatcar" and + ansible_facts['distribution_version'] is version('3975.2.0', '<')) - name: Ensure makestep parameter set as per Azure recommendation ansible.builtin.lineinfile: @@ -30,7 +34,10 @@ regexp: ^makestep line: makestep 1.0 -1 # Flatcar now includes this by default as of 3975.2.0 which causes this task to fail - when: ansible_os_family != "Flatcar" or (ansible_os_family == "Flatcar" and ansible_distribution_version is version('3975.2.0', '<')) + when: > + ansible_facts['os_family'] != "Flatcar" or + (ansible_facts['os_family'] == "Flatcar" and + ansible_facts['distribution_version'] is version('3975.2.0', '<')) - name: Install iptables persistence ansible.builtin.apt: @@ -40,7 +47,7 @@ vars: packages: - iptables-persistent - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Block traffic to 168.63.129.16 port 80 for cve-2021-27075 ansible.builtin.copy: @@ -49,13 +56,13 @@ owner: root group: root mode: "0644" - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Load iptable rules from file community.general.iptables_state: state: restored path: /etc/iptables/rules.v4 - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Install netbase and nfs-common ansible.builtin.apt: @@ -66,7 +73,7 @@ packages: - netbase - nfs-common - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" ## refer to ../files/etc/cloud/cloud.cfg.d/15_azure-vnet.cfg ## for more context on below file addition @@ -77,7 +84,7 @@ owner: root group: root mode: "0644" - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Create the credential provider path ansible.builtin.file: diff --git a/images/capi/ansible/roles/providers/tasks/azurecli.yml b/images/capi/ansible/roles/providers/tasks/azurecli.yml index ce52812f4c..a83e682362 100644 --- a/images/capi/ansible/roles/providers/tasks/azurecli.yml +++ b/images/capi/ansible/roles/providers/tasks/azurecli.yml @@ -13,7 +13,7 @@ # limitations under the License. --- - name: Install Azure CLI - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" block: - name: Add Microsoft Package Repository Key ansible.builtin.apt_key: @@ -44,13 +44,13 @@ state: present - name: Install Azure CLI - when: ansible_os_family in ["Common Base Linux Mariner", "Microsoft Azure Linux"] + when: ansible_facts['os_family'] in ["Common Base Linux Mariner", "Microsoft Azure Linux"] ansible.builtin.package: name: azure-cli state: present - name: Install Azure CLI - when: ansible_os_family == "RedHat" + when: ansible_facts['os_family'] == "RedHat" block: - name: Import the Microsoft repository key ansible.builtin.rpm_key: diff --git a/images/capi/ansible/roles/providers/tasks/cloudstack.yml b/images/capi/ansible/roles/providers/tasks/cloudstack.yml index 3a427ee500..b21d2fdd7f 100644 --- a/images/capi/ansible/roles/providers/tasks/cloudstack.yml +++ b/images/capi/ansible/roles/providers/tasks/cloudstack.yml @@ -26,8 +26,8 @@ mode: "0644" - name: Run dracut cmd to regenerate initramfs with all drivers - needed when converting to different hypervisor templates - ansible.builtin.shell: dracut --force --no-hostonly - when: ansible_os_family == "RedHat" + ansible.builtin.command: dracut --force --no-hostonly + when: ansible_facts['os_family'] == "RedHat" - name: Add draut cmd to regenerate initramfs with only necessary drivers on first boot ansible.builtin.lineinfile: @@ -36,4 +36,4 @@ line: |- bootcmd: - dracut --force - when: ansible_os_family == "RedHat" + when: ansible_facts['os_family'] == "RedHat" diff --git a/images/capi/ansible/roles/providers/tasks/googlecompute.yml b/images/capi/ansible/roles/providers/tasks/googlecompute.yml index c75f67b90e..7dc531cc1d 100644 --- a/images/capi/ansible/roles/providers/tasks/googlecompute.yml +++ b/images/capi/ansible/roles/providers/tasks/googlecompute.yml @@ -13,7 +13,7 @@ # limitations under the License. --- - name: Install gcloud SDK - when: ansible_os_family != "RedHat" + when: ansible_facts['os_family'] != "RedHat" block: - name: Download gcloud SDK ansible.builtin.get_url: @@ -21,7 +21,7 @@ dest: /tmp/install-gcloud.sh mode: "0700" - name: Execute install-gcloud.sh - ansible.builtin.shell: bash -o errexit -o pipefail /tmp/install-gcloud.sh --disable-prompts --install-dir=/ + ansible.builtin.command: bash -o errexit -o pipefail /tmp/install-gcloud.sh --disable-prompts --install-dir=/ - name: Remove install-gcloud.sh ansible.builtin.file: path: /tmp/install-gcloud.sh @@ -39,7 +39,7 @@ with_items: "{{ find.files }}" - name: Install gcloud SDK - when: ansible_os_family == "RedHat" + when: ansible_facts['os_family'] == "RedHat" block: - name: Add gcloud repository info ansible.builtin.shell: | @@ -53,23 +53,23 @@ gpgkey=https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg EOM - name: Install google-cloud-cli package - ansible.builtin.yum: + ansible.builtin.dnf: name: google-cloud-cli state: present - name: Install cloud-init packages - ansible.builtin.yum: + ansible.builtin.dnf: name: "{{ packages }}" state: present vars: packages: - cloud-init - cloud-utils-growpart - when: ansible_os_family == "RedHat" + when: ansible_facts['os_family'] == "RedHat" - name: Disable Hyper-V KVP protocol daemon on Ubuntu ansible.builtin.systemd: name: hv-kvp-daemon state: stopped enabled: false - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" diff --git a/images/capi/ansible/roles/providers/tasks/hcloud.yml b/images/capi/ansible/roles/providers/tasks/hcloud.yml index a7f4dc027b..d8aa580439 100644 --- a/images/capi/ansible/roles/providers/tasks/hcloud.yml +++ b/images/capi/ansible/roles/providers/tasks/hcloud.yml @@ -23,7 +23,7 @@ - cloud-guest-utils - cloud-initramfs-copymods - cloud-initramfs-dyn-netconf - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Install cloud-tools and tools packages ansible.builtin.apt: @@ -34,17 +34,17 @@ packages: - linux-cloud-tools-generic - linux-tools-generic - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Install cloud-init packages - ansible.builtin.yum: + ansible.builtin.dnf: name: "{{ packages }}" state: present vars: packages: - cloud-init - cloud-utils-growpart - when: ansible_os_family == "RedHat" + when: ansible_facts['os_family'] == "RedHat" - name: Install CSI prerequisites on Ubuntu ansible.builtin.apt: @@ -57,10 +57,10 @@ - open-iscsi - lvm2 - xfsprogs - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Install CSI prerequisites on RedHat - ansible.builtin.yum: + ansible.builtin.dnf: name: "{{ packages }}" state: present vars: @@ -69,18 +69,18 @@ - nfs-utils - lvm2 - xfsprogs - when: ansible_os_family == "RedHat" + when: ansible_facts['os_family'] == "RedHat" - name: Enable iSCSI initiator daemon on Ubuntu or RedHat ansible.builtin.systemd: name: iscsid state: started enabled: true - when: ansible_os_family in ["Debian", "Redhat"] + when: ansible_facts['os_family'] in ["Debian", "Redhat"] - name: Disable Hyper-V KVP protocol daemon on Ubuntu ansible.builtin.systemd: name: hv-kvp-daemon state: stopped enabled: false - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" diff --git a/images/capi/ansible/roles/providers/tasks/huaweicloud.yml b/images/capi/ansible/roles/providers/tasks/huaweicloud.yml index d7e390cce0..ee0a6d3c16 100644 --- a/images/capi/ansible/roles/providers/tasks/huaweicloud.yml +++ b/images/capi/ansible/roles/providers/tasks/huaweicloud.yml @@ -17,21 +17,21 @@ name: pip executable: pip3 state: latest - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Uninstall cloud-init pip package ansible.builtin.pip: name: "cloud-init" executable: pip3 state: absent - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Create dns default conf directory ansible.builtin.file: path: /etc/systemd/resolved.conf.d state: directory mode: "0755" - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Add default dns ansible.builtin.copy: @@ -42,4 +42,4 @@ owner: root group: root mode: "0644" - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" diff --git a/images/capi/ansible/roles/providers/tasks/maas-ubuntu.yml b/images/capi/ansible/roles/providers/tasks/maas-ubuntu.yml index 7f67a0fa3c..1d0974e88a 100644 --- a/images/capi/ansible/roles/providers/tasks/maas-ubuntu.yml +++ b/images/capi/ansible/roles/providers/tasks/maas-ubuntu.yml @@ -4,12 +4,12 @@ state: directory mode: "0775" -- name: Copy curtin scripts to /curtin - ansible.builtin.copy: - src: "files/maas/curtin/{{ item }}" - dest: "/curtin/{{ item }}" - mode: "0750" - loop: - - curtin-hooks - - install-custom-packages - - setup-bootloader +# Additional instruction: +# If you need to keep a custom curtin script, create a custom role containing the curtin hooks. +# For more information on how to create and use custom roles, refer to the official documentation: +# https://image-builder.sigs.k8s.io/capi/capi#customization + +# iSCSI configuration note: +# If you need unique names for the iSCSI InitiatorName, add a KubeadmConfigTemplate and include the following command under spec.preKubeadmCommands: +# +# echo "InitiatorName=$(iscsi-iname -p iqn.2004-10.com.ubuntu:$(cat /etc/hostname))" > /etc/iscsi/initiatorname.iscsi diff --git a/images/capi/ansible/roles/providers/tasks/maas.yml b/images/capi/ansible/roles/providers/tasks/maas.yml index 6b74724792..0c1de5baaf 100644 --- a/images/capi/ansible/roles/providers/tasks/maas.yml +++ b/images/capi/ansible/roles/providers/tasks/maas.yml @@ -3,4 +3,4 @@ - name: Include MaaS Specific configs for Ubuntu Distro ansible.builtin.include_tasks: maas-ubuntu.yml - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" diff --git a/images/capi/ansible/roles/providers/tasks/main.yml b/images/capi/ansible/roles/providers/tasks/main.yml index 681866b8e0..38b651856b 100644 --- a/images/capi/ansible/roles/providers/tasks/main.yml +++ b/images/capi/ansible/roles/providers/tasks/main.yml @@ -12,52 +12,68 @@ # See the License for the specific language governing permissions and # limitations under the License. --- -- ansible.builtin.include_tasks: aws.yml +- name: Include AWS provider tasks + ansible.builtin.include_tasks: aws.yml when: packer_builder_type.startswith('amazon') -- ansible.builtin.include_tasks: azure.yml +- name: Include Azure provider tasks + ansible.builtin.include_tasks: azure.yml when: packer_builder_type.startswith('azure') -- ansible.builtin.include_tasks: outscale.yml +- name: Include Outscale provider tasks + ansible.builtin.include_tasks: outscale.yml when: packer_builder_type.startswith('outscale') -- ansible.builtin.include_tasks: vmware.yml +- name: Include VMware provider tasks + ansible.builtin.include_tasks: vmware.yml when: packer_builder_type is search('vmware') or packer_builder_type is search('vsphere') -- ansible.builtin.include_tasks: googlecompute.yml +- name: Include Google Compute provider tasks + ansible.builtin.include_tasks: googlecompute.yml when: packer_builder_type.startswith('googlecompute') -- ansible.builtin.include_tasks: openstack.yml +- name: Include OpenStack provider tasks + ansible.builtin.include_tasks: openstack.yml when: packer_builder_type.startswith('openstack') -- ansible.builtin.include_tasks: oci.yml +- name: Include OCI provider tasks + ansible.builtin.include_tasks: oci.yml when: packer_builder_type.startswith('oracle-oci') -- ansible.builtin.include_tasks: proxmox.yml +- name: Include Proxmox provider tasks + ansible.builtin.include_tasks: proxmox.yml when: packer_builder_type.startswith('proxmox') -- ansible.builtin.include_tasks: qemu.yml +- name: Include QEMU provider tasks + ansible.builtin.include_tasks: qemu.yml when: packer_builder_type is search('qemu') and build_target is not search('raw') -- ansible.builtin.include_tasks: cloudstack.yml +- name: Include CloudStack provider tasks + ansible.builtin.include_tasks: cloudstack.yml when: packer_builder_type is search('qemu') and provider is defined and provider is search('cloudstack') -- ansible.builtin.include_tasks: raw.yml +- name: Include raw provider tasks + ansible.builtin.include_tasks: raw.yml when: packer_builder_type is search('qemu') and build_target is search('raw') -- ansible.builtin.include_tasks: nutanix.yml +- name: Include Nutanix provider tasks + ansible.builtin.include_tasks: nutanix.yml when: packer_builder_type is search('nutanix') -- ansible.builtin.include_tasks: hcloud.yml +- name: Include Hetzner Cloud provider tasks + ansible.builtin.include_tasks: hcloud.yml when: packer_builder_type is search('hcloud') -- ansible.builtin.include_tasks: huaweicloud.yml +- name: Include Huawei Cloud provider tasks + ansible.builtin.include_tasks: huaweicloud.yml when: packer_builder_type.startswith('huaweicloud') -- ansible.builtin.include_tasks: scaleway.yml +- name: Include Scaleway provider tasks + ansible.builtin.include_tasks: scaleway.yml when: packer_builder_type.startswith('scaleway') -- ansible.builtin.include_tasks: maas.yml +- name: Include MAAS provider tasks + ansible.builtin.include_tasks: maas.yml when: packer_builder_type is search('qemu') and provider is defined and provider is search('maas') # Create a boot order configuration @@ -68,7 +84,7 @@ path: /etc/systemd/system/cloud-final.service.d state: directory mode: "0755" - when: ansible_os_family != "Flatcar" + when: ansible_facts['os_family'] != "Flatcar" - name: Create cloud-final boot order drop in file ansible.builtin.copy: @@ -77,14 +93,14 @@ owner: root group: root mode: "0644" - when: ansible_os_family != "Flatcar" + when: ansible_facts['os_family'] != "Flatcar" - name: Creates unit file directory for cloud-config ansible.builtin.file: path: /etc/systemd/system/cloud-config.service.d state: directory mode: "0755" - when: ansible_os_family != "Flatcar" + when: ansible_facts['os_family'] != "Flatcar" - name: Create cloud-config boot order drop in file ansible.builtin.copy: @@ -93,7 +109,7 @@ owner: root group: root mode: "0644" - when: ansible_os_family != "Flatcar" + when: ansible_facts['os_family'] != "Flatcar" # Some OS might disable cloud-final service on boot (rhel 7). # Enable all cloud-init services on boot. @@ -106,7 +122,7 @@ - cloud-config - cloud-init - cloud-init-local - when: ansible_os_family != "Flatcar" + when: ansible_facts['os_family'] != "Flatcar" - name: Create cloud-init config file ansible.builtin.copy: @@ -115,7 +131,7 @@ owner: root group: root mode: "0644" - when: ansible_os_family != "Flatcar" + when: ansible_facts['os_family'] != "Flatcar" # `feature_overrides.py` only works on old cloud-init versions (removed in https://github.com/canonical/cloud-init/pull/4228)... - name: Set cloudinit feature flags @@ -125,7 +141,7 @@ owner: root group: root mode: "0644" - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Set cloudinit feature flags for redhat 8 ansible.builtin.copy: @@ -134,7 +150,7 @@ owner: root group: root mode: "0644" - when: ansible_os_family == "RedHat" and ansible_distribution == "RedHat" and ansible_distribution_major_version == "8" + when: ansible_facts['os_family'] == "RedHat" and ansible_facts['distribution'] == "RedHat" and ansible_facts['distribution_major_version'] == "8" - name: Set cloudinit feature flags for redhat 9 ansible.builtin.copy: @@ -143,7 +159,7 @@ owner: root group: root mode: "0644" - when: ansible_os_family == "RedHat" and ansible_distribution == "RedHat" and ansible_distribution_major_version == "9" + when: ansible_facts['os_family'] == "RedHat" and ansible_facts['distribution'] == "RedHat" and ansible_facts['distribution_major_version'] == "9" # ...and `features.py` must be patched instead - name: Patch cloud-init feature flags for Debian-based OS @@ -151,14 +167,14 @@ path: /usr/lib/python3/dist-packages/cloudinit/features.py marker: "# {mark} ANSIBLE MANAGED BLOCK (by image-builder)" block: "{{ lookup('file', 'cloud-init-features.patch') }}" - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Patch cloud-init feature flags for RedHat 9 ansible.builtin.blockinfile: path: /usr/lib/python3.9/site-packages/cloudinit/features.py marker: "# {mark} ANSIBLE MANAGED BLOCK (by image-builder)" block: "{{ lookup('file', 'cloud-init-features.patch') }}" - when: ansible_os_family == "RedHat" and ansible_distribution == "RedHat" and ansible_distribution_major_version == "9" + when: ansible_facts['os_family'] == "RedHat" and ansible_facts['distribution'] == "RedHat" and ansible_facts['distribution_major_version'] == "9" - name: Ensure chrony is running ansible.builtin.systemd: @@ -169,4 +185,4 @@ when: > (packer_builder_type.startswith('amazon') or packer_builder_type.startswith('azure') or packer_builder_type is search('vmware') or packer_builder_type is search('vsphere')) - and ansible_os_family != "Flatcar" + and ansible_facts['os_family'] != "Flatcar" diff --git a/images/capi/ansible/roles/providers/tasks/nutanix-redhat.yml b/images/capi/ansible/roles/providers/tasks/nutanix-redhat.yml index 77337390f4..97d8b57efa 100644 --- a/images/capi/ansible/roles/providers/tasks/nutanix-redhat.yml +++ b/images/capi/ansible/roles/providers/tasks/nutanix-redhat.yml @@ -13,7 +13,7 @@ # limitations under the License. --- - name: Install cloud-init packages - ansible.builtin.yum: + ansible.builtin.dnf: name: "{{ packages }}" state: present vars: @@ -22,7 +22,7 @@ - cloud-utils-growpart - name: Install CAPX prerequisites - ansible.builtin.yum: + ansible.builtin.dnf: name: "{{ packages }}" state: present vars: diff --git a/images/capi/ansible/roles/providers/tasks/nutanix.yml b/images/capi/ansible/roles/providers/tasks/nutanix.yml index 2c96ff84eb..697af4b1dd 100644 --- a/images/capi/ansible/roles/providers/tasks/nutanix.yml +++ b/images/capi/ansible/roles/providers/tasks/nutanix.yml @@ -12,11 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. --- -- ansible.builtin.include_tasks: nutanix-redhat.yml - when: ansible_os_family == "RedHat" +- name: Include Nutanix RedHat tasks + ansible.builtin.include_tasks: nutanix-redhat.yml + when: ansible_facts['os_family'] == "RedHat" -- ansible.builtin.include_tasks: nutanix-ubuntu.yml - when: ansible_os_family == "Debian" +- name: Include Nutanix Ubuntu tasks + ansible.builtin.include_tasks: nutanix-ubuntu.yml + when: ansible_facts['os_family'] == "Debian" - name: Ensure ip_vs module is loaded ansible.builtin.lineinfile: diff --git a/images/capi/ansible/roles/providers/tasks/oci.yml b/images/capi/ansible/roles/providers/tasks/oci.yml index 1bd7a24c7b..a038389935 100644 --- a/images/capi/ansible/roles/providers/tasks/oci.yml +++ b/images/capi/ansible/roles/providers/tasks/oci.yml @@ -17,18 +17,18 @@ path: /etc/iptables/rules.v4 state: absent regexp: -A INPUT -j REJECT --reject-with icmp-host-prohibited - when: ansible_distribution == "Ubuntu" + when: ansible_facts['distribution'] == "Ubuntu" - name: Remove the default input reject all iptable rule ansible.builtin.lineinfile: path: /etc/iptables/rules.v4 state: absent regexp: -A FORWARD -j REJECT --reject-with icmp-host-prohibited - when: ansible_distribution == "Ubuntu" + when: ansible_facts['distribution'] == "Ubuntu" - name: Disable firewalld service ansible.builtin.systemd: name: firewalld state: stopped enabled: false - when: ansible_distribution == "OracleLinux" + when: ansible_facts['distribution'] == "OracleLinux" diff --git a/images/capi/ansible/roles/providers/tasks/openstack.yml b/images/capi/ansible/roles/providers/tasks/openstack.yml index a5bfb809fe..48d1eb2abf 100644 --- a/images/capi/ansible/roles/providers/tasks/openstack.yml +++ b/images/capi/ansible/roles/providers/tasks/openstack.yml @@ -23,21 +23,35 @@ - cloud-guest-utils - cloud-initramfs-copymods - cloud-initramfs-dyn-netconf - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Disable Hyper-V KVP protocol daemon on Ubuntu ansible.builtin.systemd: name: hv-kvp-daemon state: stopped enabled: false - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" -- name: Install networkd-dispatcher service (Run networkd-dispatcher) +- name: Ensure networkd-dispatcher is installed + ansible.builtin.apt: + name: networkd-dispatcher + state: present + force_apt_get: true + when: ansible_facts['os_family'] == "Debian" + +- name: Enable networkd-dispatcher service ansible.builtin.systemd: name: networkd-dispatcher state: started enabled: true - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" + +- name: Create directory for DHCP chrony server files + ansible.builtin.file: + path: /var/lib/dhcp + state: directory + mode: '0755' + when: ansible_facts['os_family'] == "Debian" - name: Copy networkd-dispatcher scripts to add DHCP provided NTP servers ansible.builtin.template: @@ -51,4 +65,4 @@ - { src: files/etc/networkd-dispatcher/routable.d/20-chrony.j2, dest: /etc/networkd-dispatcher/routable.d/20-chrony } - { src: files/etc/networkd-dispatcher/off.d/20-chrony.j2, dest: /etc/networkd-dispatcher/off.d/20-chrony } - { src: files/etc/networkd-dispatcher/no-carrier.d/20-chrony.j2, dest: /etc/networkd-dispatcher/no-carrier.d/20-chrony } - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" diff --git a/images/capi/ansible/roles/providers/tasks/outscale.yml b/images/capi/ansible/roles/providers/tasks/outscale.yml index f63228eb36..086b7d6e90 100644 --- a/images/capi/ansible/roles/providers/tasks/outscale.yml +++ b/images/capi/ansible/roles/providers/tasks/outscale.yml @@ -8,5 +8,16 @@ packages: - cloud-init - cloud-guest-utils - - cloud-initramfs-copymods - cloud-initramfs-dyn-netconf + +- name: Install Debian specific packages + ansible.builtin.apt: + name: cloud-initramfs-dyn-netconf + state: present + when: ansible_facts['distribution'] == 'Debian' + +- name: Install Ubuntu specific packages + ansible.builtin.apt: + name: cloud-initramfs-copymods + state: present + when: ansible_facts['distribution'] == 'Ubuntu' diff --git a/images/capi/ansible/roles/providers/tasks/proxmox.yml b/images/capi/ansible/roles/providers/tasks/proxmox.yml index be3e8f15f3..85759302ec 100644 --- a/images/capi/ansible/roles/providers/tasks/proxmox.yml +++ b/images/capi/ansible/roles/providers/tasks/proxmox.yml @@ -23,24 +23,31 @@ - cloud-guest-utils - cloud-initramfs-copymods - cloud-initramfs-dyn-netconf - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Install cloud-init packages - ansible.builtin.yum: + ansible.builtin.dnf: name: "{{ packages }}" state: present vars: packages: - cloud-init - cloud-utils-growpart - when: ansible_os_family == "RedHat" + when: ansible_facts['os_family'] == "RedHat" - name: Disable Hyper-V KVP protocol daemon on Ubuntu ansible.builtin.systemd: name: hv-kvp-daemon state: stopped enabled: false - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" + +- name: Create directory for DHCP chrony server files + ansible.builtin.file: + path: /var/lib/dhcp + state: directory + mode: '0755' + when: ansible_facts['os_family'] == "Debian" - name: Copy networkd-dispatcher scripts to add DHCP provided NTP servers ansible.builtin.template: @@ -54,16 +61,16 @@ - { src: files/etc/networkd-dispatcher/routable.d/20-chrony.j2, dest: /etc/networkd-dispatcher/routable.d/20-chrony } - { src: files/etc/networkd-dispatcher/off.d/20-chrony.j2, dest: /etc/networkd-dispatcher/off.d/20-chrony } - { src: files/etc/networkd-dispatcher/no-carrier.d/20-chrony.j2, dest: /etc/networkd-dispatcher/no-carrier.d/20-chrony } - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Ensure networkd-dispatcher is started ansible.builtin.systemd: name: networkd-dispatcher state: started - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Ensure networkd-dispatcher is enabled ansible.builtin.systemd: name: networkd-dispatcher enabled: true - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" diff --git a/images/capi/ansible/roles/providers/tasks/qemu.yml b/images/capi/ansible/roles/providers/tasks/qemu.yml index 54c8404158..173d64a16a 100644 --- a/images/capi/ansible/roles/providers/tasks/qemu.yml +++ b/images/capi/ansible/roles/providers/tasks/qemu.yml @@ -23,24 +23,31 @@ - cloud-guest-utils - cloud-initramfs-copymods - cloud-initramfs-dyn-netconf - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Install cloud-init packages - ansible.builtin.yum: + ansible.builtin.dnf: name: "{{ packages }}" state: present vars: packages: - cloud-init - cloud-utils-growpart - when: ansible_os_family == "RedHat" + when: ansible_facts['os_family'] == "RedHat" - name: Disable Hyper-V KVP protocol daemon on Ubuntu ansible.builtin.systemd: name: hv-kvp-daemon state: stopped enabled: false - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" + +- name: Create directory for DHCP chrony server files + ansible.builtin.file: + path: /var/lib/dhcp + state: directory + mode: '0755' + when: ansible_facts['os_family'] == "Debian" - name: Copy networkd-dispatcher scripts to add DHCP provided NTP servers ansible.builtin.template: @@ -54,4 +61,4 @@ - { src: files/etc/networkd-dispatcher/routable.d/20-chrony.j2, dest: /etc/networkd-dispatcher/routable.d/20-chrony } - { src: files/etc/networkd-dispatcher/off.d/20-chrony.j2, dest: /etc/networkd-dispatcher/off.d/20-chrony } - { src: files/etc/networkd-dispatcher/no-carrier.d/20-chrony.j2, dest: /etc/networkd-dispatcher/no-carrier.d/20-chrony } - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" diff --git a/images/capi/ansible/roles/providers/tasks/raw.yml b/images/capi/ansible/roles/providers/tasks/raw.yml index 44cbf8f739..945370e134 100644 --- a/images/capi/ansible/roles/providers/tasks/raw.yml +++ b/images/capi/ansible/roles/providers/tasks/raw.yml @@ -23,21 +23,21 @@ - cloud-guest-utils - cloud-initramfs-copymods - cloud-initramfs-dyn-netconf - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Install cloud-init packages - ansible.builtin.yum: + ansible.builtin.dnf: name: "{{ packages }}" state: present vars: packages: - cloud-init - cloud-utils-growpart - when: ansible_os_family == "RedHat" + when: ansible_facts['os_family'] == "RedHat" - name: Run dracut cmd to regenerate initramfs with all drivers - needed when converting to different hypervisor templates - ansible.builtin.shell: dracut --force --no-hostonly - when: ansible_os_family == "RedHat" + ansible.builtin.command: dracut --force --no-hostonly + when: ansible_facts['os_family'] == "RedHat" - name: Symlink /usr/libexec/cloud-init to /usr/lib/cloud-init ansible.builtin.file: @@ -45,11 +45,11 @@ dest: /usr/lib/cloud-init mode: "0777" state: link - when: ansible_os_family == "RedHat" + when: ansible_facts['os_family'] == "RedHat" - name: Disable Hyper-V KVP protocol daemon on Ubuntu ansible.builtin.systemd: name: hv-kvp-daemon state: stopped enabled: false - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" diff --git a/images/capi/ansible/roles/providers/tasks/scaleway.yml b/images/capi/ansible/roles/providers/tasks/scaleway.yml index 29cde0cc60..8ceb1e9c72 100644 --- a/images/capi/ansible/roles/providers/tasks/scaleway.yml +++ b/images/capi/ansible/roles/providers/tasks/scaleway.yml @@ -17,4 +17,4 @@ name: hv-kvp-daemon state: stopped enabled: false - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" diff --git a/images/capi/ansible/roles/providers/tasks/vmware-photon.yml b/images/capi/ansible/roles/providers/tasks/vmware-photon.yml index fbffb314a1..57ba6871a7 100644 --- a/images/capi/ansible/roles/providers/tasks/vmware-photon.yml +++ b/images/capi/ansible/roles/providers/tasks/vmware-photon.yml @@ -58,6 +58,12 @@ state: started enabled: true +- name: Create directory for DHCP chrony server files + ansible.builtin.file: + path: /var/lib/dhclient + state: directory + mode: '0755' + - name: Copy networkd-dispatcher scripts to add DHCP provided NTP servers ansible.builtin.template: src: "{{ item.src }}" diff --git a/images/capi/ansible/roles/providers/tasks/vmware-redhat.yml b/images/capi/ansible/roles/providers/tasks/vmware-redhat.yml index bfb929a6ea..1757c6f5ca 100644 --- a/images/capi/ansible/roles/providers/tasks/vmware-redhat.yml +++ b/images/capi/ansible/roles/providers/tasks/vmware-redhat.yml @@ -14,7 +14,7 @@ --- - name: Install cloud-init packages - ansible.builtin.yum: + ansible.builtin.dnf: name: "{{ packages }}" state: present vars: @@ -31,13 +31,13 @@ cloud_init_version: "{{ ansible_facts.packages['cloud-init'][0].version }}" - name: Install python2 pip - ansible.builtin.yum: + ansible.builtin.dnf: name: "{{ packages }}" state: present vars: packages: - python2-pip - when: ansible_distribution_major_version|int <= 8 + when: ansible_facts['distribution_major_version']|int <= 8 # pip on CentOS needs to be upgraded, but since it's still # Python 2.7, need < 21.0 @@ -45,7 +45,7 @@ ansible.builtin.pip: name: pip<21.0 state: forcereinstall - when: ansible_distribution_major_version == '7' + when: ansible_facts['distribution_major_version'] == '7' # Directly installing Guestinfo datasource is needed so long as # cloud-init is < 21.3 @@ -59,7 +59,7 @@ mode: "0700" - name: Execute cloud-init-vmware.sh - ansible.builtin.shell: bash -o errexit -o pipefail /tmp/cloud-init-vmware.sh + ansible.builtin.command: bash -o errexit -o pipefail /tmp/cloud-init-vmware.sh environment: REPO_SLUG: "{{ guestinfo_datasource_slug }}" GIT_REF: "{{ guestinfo_datasource_ref }}" diff --git a/images/capi/ansible/roles/providers/tasks/vmware-ubuntu.yml b/images/capi/ansible/roles/providers/tasks/vmware-ubuntu.yml index 1863619259..b63187e202 100644 --- a/images/capi/ansible/roles/providers/tasks/vmware-ubuntu.yml +++ b/images/capi/ansible/roles/providers/tasks/vmware-ubuntu.yml @@ -42,6 +42,12 @@ state: started enabled: true +- name: Create directory for DHCP chrony server files + ansible.builtin.file: + path: /var/lib/dhcp + state: directory + mode: '0755' + - name: Copy networkd-dispatcher scripts to add DHCP provided NTP servers ansible.builtin.template: src: "{{ item.src }}" @@ -49,7 +55,7 @@ mode: a+x vars: server_dir: /var/lib/dhcp - chrony_helper_dir: /usr/lib/chrony + chrony_helper_dir: "{{ '/usr/libexec/chrony' if ansible_facts['distribution_version'] is version('22.04', '>=') else '/usr/lib/chrony' }}" loop: - { src: files/etc/networkd-dispatcher/routable.d/20-chrony.j2, dest: /etc/networkd-dispatcher/routable.d/20-chrony } - { src: files/etc/networkd-dispatcher/off.d/20-chrony.j2, dest: /etc/networkd-dispatcher/off.d/20-chrony } @@ -62,4 +68,4 @@ content: | datasource: VMware mode: "0644" - when: ansible_distribution_version is version('22.04', '>=') + when: ansible_facts['distribution_version'] is version('22.04', '>=') diff --git a/images/capi/ansible/roles/providers/tasks/vmware.yml b/images/capi/ansible/roles/providers/tasks/vmware.yml index 940b6f9272..507b0ed55d 100644 --- a/images/capi/ansible/roles/providers/tasks/vmware.yml +++ b/images/capi/ansible/roles/providers/tasks/vmware.yml @@ -12,14 +12,17 @@ # See the License for the specific language governing permissions and # limitations under the License. --- -- ansible.builtin.include_tasks: vmware-photon.yml - when: ansible_os_family == "VMware Photon OS" +- name: Include VMware Photon tasks + ansible.builtin.include_tasks: vmware-photon.yml + when: ansible_facts['os_family'] == "VMware Photon OS" -- ansible.builtin.include_tasks: vmware-ubuntu.yml - when: ansible_os_family == "Debian" +- name: Include VMware Ubuntu tasks + ansible.builtin.include_tasks: vmware-ubuntu.yml + when: ansible_facts['os_family'] == "Debian" -- ansible.builtin.include_tasks: vmware-redhat.yml - when: ansible_os_family == "RedHat" +- name: Include VMware RedHat tasks + ansible.builtin.include_tasks: vmware-redhat.yml + when: ansible_facts['os_family'] == "RedHat" - name: Create provider vmtools config drop-in file ansible.builtin.copy: @@ -28,7 +31,7 @@ owner: root group: root mode: "0644" - when: ansible_os_family != "Flatcar" + when: ansible_facts['os_family'] != "Flatcar" - name: Create service to modify cloud-init config ansible.builtin.copy: @@ -37,7 +40,7 @@ owner: root group: root mode: "0644" - when: ansible_os_family != "Flatcar" + when: ansible_facts['os_family'] != "Flatcar" - name: Copy cloud-init modification script ansible.builtin.copy: @@ -46,7 +49,7 @@ owner: root group: root mode: "0755" - when: ansible_os_family != "Flatcar" + when: ansible_facts['os_family'] != "Flatcar" - name: Enable modify-cloud-init-cfg.service ansible.builtin.systemd: @@ -54,4 +57,4 @@ daemon_reload: true enabled: true state: stopped - when: ansible_os_family != "Flatcar" + when: ansible_facts['os_family'] != "Flatcar" diff --git a/images/capi/ansible/roles/python/tasks/main.yml b/images/capi/ansible/roles/python/tasks/main.yml index f106fe6aa5..b112b67de4 100644 --- a/images/capi/ansible/roles/python/tasks/main.yml +++ b/images/capi/ansible/roles/python/tasks/main.yml @@ -16,7 +16,8 @@ ansible.builtin.raw: grep DISTRIB_ID /etc/lsb-release || echo '/etc/lsb-release not found' register: distrib_id -- ansible.builtin.include_tasks: flatcar.yml - # We can't use ansible_os_family fact here for consistency, as facts gathering +- name: Include Flatcar Python tasks + ansible.builtin.include_tasks: flatcar.yml + # We can't use ansible_facts['os_family'] fact here for consistency, as facts gathering # is disabled in the playbook which includes this role. See playbook for more details. when: distrib_id.stdout_lines[0] is search("Flatcar") diff --git a/images/capi/ansible/roles/security/tasks/falco.yml b/images/capi/ansible/roles/security/tasks/falco.yml index ecb4aa8366..b3c129b6e2 100644 --- a/images/capi/ansible/roles/security/tasks/falco.yml +++ b/images/capi/ansible/roles/security/tasks/falco.yml @@ -15,7 +15,7 @@ --- - name: Install Falco on Debian based systems - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" block: - name: Add Falco package signing key ansible.builtin.apt_key: @@ -33,7 +33,7 @@ pkg: - dkms - make - - "linux-headers-{{ ansible_kernel }}" + - "linux-headers-{{ ansible_facts['kernel'] }}" - clang - llvm update_cache: true @@ -43,7 +43,7 @@ until: pkg_result is success - name: Install Falco on RedHat based systems - when: ansible_os_family == "RedHat" + when: ansible_facts['os_family'] == "RedHat" block: - name: Add Falco YUM repo ansible.builtin.yum_repository: @@ -60,7 +60,7 @@ pkg: - dkms - make - - "kernel-devel-{{ ansible_kernel }}" + - "kernel-devel-{{ ansible_facts['kernel'] }}" - clang - llvm - dialog @@ -73,11 +73,11 @@ ansible.builtin.package: name: falco state: present - when: ansible_os_family == "Debian" or ansible_os_family == "RedHat" + when: ansible_facts['os_family'] == "Debian" or ansible_facts['os_family'] == "RedHat" - name: Enable Falco Modern eBPF ansible.builtin.service: name: falco-modern-bpf state: started enabled: true - when: ansible_os_family == "Debian" or ansible_os_family == "RedHat" + when: ansible_facts['os_family'] == "Debian" or ansible_facts['os_family'] == "RedHat" diff --git a/images/capi/ansible/roles/security/tasks/trivy.yml b/images/capi/ansible/roles/security/tasks/trivy.yml index 58408ae716..28ae28037e 100644 --- a/images/capi/ansible/roles/security/tasks/trivy.yml +++ b/images/capi/ansible/roles/security/tasks/trivy.yml @@ -15,7 +15,7 @@ --- - name: Install Trivy on Debian based systems - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" block: - name: Add Trivy package signing key ansible.builtin.apt_key: @@ -24,19 +24,19 @@ - name: Add Trivy apt repo ansible.builtin.apt_repository: - repo: "deb https://aquasecurity.github.io/trivy-repo/deb {{ansible_distribution_release}} main" + repo: "deb https://aquasecurity.github.io/trivy-repo/deb {{ ansible_facts['distribution_release'] }} main" state: present filename: trivy - name: Install Trivy on RedHat based systems - when: ansible_os_family == "RedHat" + when: ansible_facts['os_family'] == "RedHat" block: - name: Add Trivy rpm repo ansible.builtin.yum_repository: name: Trivy repository description: Trivy YUM repo file: trivy - baseurl: https://aquasecurity.github.io/trivy-repo/rpm/releases/{{ ansible_distribution_release }}/{{ ansible_architecture }}/ + baseurl: https://aquasecurity.github.io/trivy-repo/rpm/releases/{{ ansible_facts['distribution_release'] }}/{{ ansible_facts['architecture'] }}/ gpgcheck: true enabled: true gpgkey: https://aquasecurity.github.io/trivy-repo/rpm/public.keyy @@ -46,10 +46,10 @@ name: trivy update_cache: true state: present - when: ansible_os_family == "Debian" or ansible_os_family == "RedHat" + when: ansible_facts['os_family'] == "Debian" or ansible_facts['os_family'] == "RedHat" - name: Update Trivy DB to ensure latest records are available as of now ansible.builtin.command: trivy rootfs --download-db-only args: creates: ~/.cache/trivy/db/trivy.db - when: ansible_os_family == "Debian" or ansible_os_family == "RedHat" + when: ansible_facts['os_family'] == "Debian" or ansible_facts['os_family'] == "RedHat" diff --git a/images/capi/ansible/roles/setup/defaults/main.yml b/images/capi/ansible/roles/setup/defaults/main.yml index 8432b7868d..9a7dab0f5f 100644 --- a/images/capi/ansible/roles/setup/defaults/main.yml +++ b/images/capi/ansible/roles/setup/defaults/main.yml @@ -19,8 +19,10 @@ redhat_epel_rpm: https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noa epel_rpm_gpg_key: https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-7 rpms: "" extra_rpms: "" +ubuntu_repo: "http://us.archive.ubuntu.com/ubuntu/" +ubuntu_security_repo: "http://security.ubuntu.com/ubuntu/" disable_public_repos: false -external_binary_path: "{{ '/opt/bin' if ansible_os_family == 'Flatcar' else '/usr/local/bin' }}" +external_binary_path: "{{ '/opt/bin' if ansible_facts['os_family'] == 'Flatcar' else '/usr/local/bin' }}" extra_repos: "" pip_conf_file: "" diff --git a/images/capi/ansible/roles/setup/tasks/azurelinux.yml b/images/capi/ansible/roles/setup/tasks/azurelinux.yml index 0ed1f5c7a4..7b956f1304 100644 --- a/images/capi/ansible/roles/setup/tasks/azurelinux.yml +++ b/images/capi/ansible/roles/setup/tasks/azurelinux.yml @@ -12,9 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. --- -- ansible.builtin.import_tasks: rpm_repos.yml +- name: Import RPM repository tasks + ansible.builtin.import_tasks: rpm_repos.yml -- name: Perform a tdnf update +- name: Perform a tdnf update # noqa: package-latest ansible.builtin.dnf: name: "*" state: latest diff --git a/images/capi/ansible/roles/setup/tasks/bootstrap-flatcar.yml b/images/capi/ansible/roles/setup/tasks/bootstrap-flatcar.yml index c070dcc9e9..8358f8dfcd 100644 --- a/images/capi/ansible/roles/setup/tasks/bootstrap-flatcar.yml +++ b/images/capi/ansible/roles/setup/tasks/bootstrap-flatcar.yml @@ -23,6 +23,6 @@ - name: Override Flatcar's OS family ansible.builtin.set_fact: ansible_os_family: Flatcar - when: ansible_os_family == "Flatcar Container Linux by Kinvolk" + when: ansible_facts['os_family'] == "Flatcar Container Linux by Kinvolk" tags: - facts diff --git a/images/capi/ansible/roles/setup/tasks/debian.yml b/images/capi/ansible/roles/setup/tasks/debian.yml index 41e0fd631e..f860fd09d4 100644 --- a/images/capi/ansible/roles/setup/tasks/debian.yml +++ b/images/capi/ansible/roles/setup/tasks/debian.yml @@ -21,7 +21,10 @@ # from this repo leads to build failures(especially in Arm), hence ignoring the step. # Ubuntu 24.04 has changed to deb822 source management # As a result the there is change in format source configurations and location - when: (packer_builder_type != "oracle-oci") and ((ansible_distribution == "Ubuntu") and (ansible_distribution_major_version is version('24', '<'))) + when: > + (packer_builder_type != "oracle-oci") and + ((ansible_facts['distribution'] == "Ubuntu") and + (ansible_facts['distribution_major_version'] is version('24', '<'))) - name: Put templated ubuntu.sources in place ansible.builtin.template: @@ -30,7 +33,7 @@ mode: "0644" # Ubuntu 24.04 has changed to deb822 source management # As a result the there is change in format source configurations and location - when: (ansible_distribution == "Ubuntu") and (ansible_distribution_major_version is version('24', '>=')) + when: (ansible_facts['distribution'] == "Ubuntu") and (ansible_facts['distribution_major_version'] is version('24', '>=')) - name: Put templated apt.conf.d/90proxy in place when defined ansible.builtin.template: @@ -80,7 +83,10 @@ when: packer_build_name is search('cvm') - name: Add '--no-tpm --no-efivars' to nullboot post install script - ansible.builtin.command: sed -i 's/nullbootctl/nullbootctl --no-tpm --no-efivars/' /var/lib/dpkg/info/nullboot.postinst + ansible.builtin.replace: + path: /var/lib/dpkg/info/nullboot.postinst + regexp: nullbootctl + replace: nullbootctl --no-tpm --no-efivars when: packer_build_name is search('cvm') - name: Perform a dist-upgrade @@ -93,7 +99,7 @@ retries: 5 delay: 10 -- name: Install baseline dependencies +- name: Install baseline dependencies # noqa: package-latest ansible.builtin.apt: force_apt_get: true update_cache: true @@ -104,7 +110,7 @@ retries: 5 delay: 10 -- name: Install extra debs +- name: Install extra debs # noqa: package-latest ansible.builtin.apt: force_apt_get: true name: "{{ extra_debs.split() }}" @@ -126,5 +132,8 @@ delay: 10 - name: Remove '--no-tpm --no-efivars' from nullboot post install script - ansible.builtin.command: sed -i 's/nullbootctl --no-tpm --no-efivars/nullbootctl/' /var/lib/dpkg/info/nullboot.postinst + ansible.builtin.replace: + path: /var/lib/dpkg/info/nullboot.postinst + regexp: nullbootctl --no-tpm --no-efivars + replace: nullbootctl when: packer_build_name is search('cvm') diff --git a/images/capi/ansible/roles/setup/tasks/flatcar.yml b/images/capi/ansible/roles/setup/tasks/flatcar.yml index 5db4e8504b..08b71fd724 100644 --- a/images/capi/ansible/roles/setup/tasks/flatcar.yml +++ b/images/capi/ansible/roles/setup/tasks/flatcar.yml @@ -12,7 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. --- -- ansible.builtin.include_tasks: bootstrap-flatcar.yml +- name: Include Flatcar bootstrap tasks + ansible.builtin.include_tasks: bootstrap-flatcar.yml - name: Create system-environment-generators directory ansible.builtin.file: @@ -20,6 +21,16 @@ state: directory mode: "0755" +# Flatcar Stable 4593.2.0+ ships without /etc/sysctl.d/ pre-created. The +# node role's sysctl tasks write to /etc/sysctl.d/99-sysctl.conf via the +# ansible.posix.sysctl module, which uses mkstemp in the parent directory +# and fails with FileNotFoundError when the directory is missing. +- name: Create /etc/sysctl.d directory + ansible.builtin.file: + path: /etc/sysctl.d + state: directory + mode: "0755" + - name: Add env generator that includes system PATH on service path ansible.builtin.copy: src: etc/systemd/system-environment-generators/10-flatcar-path diff --git a/images/capi/ansible/roles/setup/tasks/main.yml b/images/capi/ansible/roles/setup/tasks/main.yml index e4174217c1..71fd46752c 100644 --- a/images/capi/ansible/roles/setup/tasks/main.yml +++ b/images/capi/ansible/roles/setup/tasks/main.yml @@ -12,23 +12,28 @@ # See the License for the specific language governing permissions and # limitations under the License. --- -- ansible.builtin.import_tasks: debian.yml - when: ansible_os_family == "Debian" +- name: Import Debian setup tasks + ansible.builtin.import_tasks: debian.yml + when: ansible_facts['os_family'] == "Debian" -- ansible.builtin.import_tasks: flatcar.yml - # This task overrides ansible_os_family to "Flatcar" as a workaround for +- name: Import Flatcar setup tasks + ansible.builtin.import_tasks: flatcar.yml + # This task overrides ansible_facts['os_family'] to "Flatcar" as a workaround for # regression between Flatcar and Ansible, so rest of the code can use just # "Flatcar" for comparison, which is the correct value. - when: ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] + when: ansible_facts['os_family'] in ["Flatcar", "Flatcar Container Linux by Kinvolk"] -- ansible.builtin.import_tasks: azurelinux.yml - when: ansible_os_family in ["Common Base Linux Mariner", "Microsoft Azure Linux"] +- name: Import Azure Linux setup tasks + ansible.builtin.import_tasks: azurelinux.yml + when: ansible_facts['os_family'] in ["Common Base Linux Mariner", "Microsoft Azure Linux"] -- ansible.builtin.import_tasks: redhat.yml - when: ansible_os_family == "RedHat" +- name: Import RedHat setup tasks + ansible.builtin.import_tasks: redhat.yml + when: ansible_facts['os_family'] == "RedHat" -- ansible.builtin.import_tasks: photon.yml - when: ansible_os_family == "VMware Photon OS" +- name: Import Photon setup tasks + ansible.builtin.import_tasks: photon.yml + when: ansible_facts['os_family'] == "VMware Photon OS" # Copy in pip config file when defined - name: Install pip config file diff --git a/images/capi/ansible/roles/setup/tasks/photon.yml b/images/capi/ansible/roles/setup/tasks/photon.yml index 5bc552245b..095b01cc2b 100644 --- a/images/capi/ansible/roles/setup/tasks/photon.yml +++ b/images/capi/ansible/roles/setup/tasks/photon.yml @@ -12,6 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. --- +# Pin the Python interpreter to the versionless symlink so that it +# survives tdnf distro-sync upgrading the Python version (e.g. 3.11→3.12), +# which removes the old versioned binary that Ansible auto-discovered. +- name: Pin ansible_python_interpreter to /usr/bin/python3 + ansible.builtin.set_fact: + ansible_python_interpreter: /usr/bin/python3 + - name: Add bash_profile ansible.builtin.template: dest: /home/builder/.bash_profile @@ -20,7 +27,8 @@ owner: builder group: builder -- ansible.builtin.import_tasks: rpm_repos.yml +- name: Import RPM repository tasks + ansible.builtin.import_tasks: rpm_repos.yml - name: Update the repos package to import the recent gpg keys ansible.builtin.command: tdnf update -y photon-repos --enablerepo=photon --refresh diff --git a/images/capi/ansible/roles/setup/tasks/redhat.yml b/images/capi/ansible/roles/setup/tasks/redhat.yml index 030e7f5cf3..6bf171aa2b 100644 --- a/images/capi/ansible/roles/setup/tasks/redhat.yml +++ b/images/capi/ansible/roles/setup/tasks/redhat.yml @@ -19,7 +19,7 @@ password: "{{ lookup('env', 'RHSM_PASS') }}" auto_attach: true when: - - ansible_distribution == "RedHat" + - ansible_facts['distribution'] == "RedHat" - lookup('env', 'RHSM_USER') | length > 0 - lookup('env', 'RHSM_PASS') | length > 0 @@ -33,7 +33,7 @@ when: packer_builder_type.startswith('amazon') - name: Install EPEL package - ansible.builtin.yum: + ansible.builtin.dnf: name: epel-release state: present when: packer_builder_type.startswith('amazon') @@ -45,28 +45,29 @@ when: epel_rpm_gpg_key != "" and not packer_builder_type.startswith('amazon') and not packer_builder_type.startswith('scaleway') - name: Add epel repo - ansible.builtin.yum: + ansible.builtin.dnf: name: "{{ redhat_epel_rpm }}" state: present lock_timeout: 60 when: redhat_epel_rpm != "" and not packer_builder_type.startswith('amazon') and not packer_builder_type.startswith('scaleway') -- ansible.builtin.import_tasks: rpm_repos.yml +- name: Import RPM repository tasks + ansible.builtin.import_tasks: rpm_repos.yml -- name: Perform a yum update - ansible.builtin.yum: +- name: Perform a yum update # noqa: package-latest + ansible.builtin.dnf: name: "*" state: latest lock_timeout: 60 - name: Install baseline dependencies - ansible.builtin.yum: + ansible.builtin.dnf: name: "{{ rpms }}" state: present lock_timeout: 60 - name: Install extra rpms - ansible.builtin.yum: + ansible.builtin.dnf: name: "{{ extra_rpms.split() }}" state: present lock_timeout: 60 diff --git a/images/capi/ansible/roles/setup/templates/etc/apt/sources.list.d/ubuntu.sources.j2 b/images/capi/ansible/roles/setup/templates/etc/apt/sources.list.d/ubuntu.sources.j2 index 012e7dd781..82dad028dd 100644 --- a/images/capi/ansible/roles/setup/templates/etc/apt/sources.list.d/ubuntu.sources.j2 +++ b/images/capi/ansible/roles/setup/templates/etc/apt/sources.list.d/ubuntu.sources.j2 @@ -1,11 +1,11 @@ Types: deb -URIs: http://us.archive.ubuntu.com/ubuntu/ -Suites: {{ ansible_distribution_release }} {{ ansible_distribution_release }}-updates {{ ansible_distribution_release }}-backports +URIs: {{ ubuntu_repo }} +Suites: {{ ansible_facts['distribution_release'] }} {{ ansible_facts['distribution_release'] }}-updates {{ ansible_facts['distribution_release'] }}-backports Components: main restricted universe multiverse Signed-By: /usr/share/keyrings/ubuntu-archive-keyring.gpg Types: deb -URIs: http://security.ubuntu.com/ubuntu/ -Suites: {{ ansible_distribution_release }}-security +URIs: {{ ubuntu_security_repo }} +Suites: {{ ansible_facts['distribution_release'] }}-security Components: main restricted universe multiverse Signed-By: /usr/share/keyrings/ubuntu-archive-keyring.gpg \ No newline at end of file diff --git a/images/capi/ansible/roles/setup/templates/etc/apt/sources.list.j2 b/images/capi/ansible/roles/setup/templates/etc/apt/sources.list.j2 index eaec06f851..4ba56f1a27 100644 --- a/images/capi/ansible/roles/setup/templates/etc/apt/sources.list.j2 +++ b/images/capi/ansible/roles/setup/templates/etc/apt/sources.list.j2 @@ -1,4 +1,4 @@ -deb {{ ubuntu_repo }} {{ ansible_distribution_release }} main restricted universe -deb {{ ubuntu_repo }} {{ ansible_distribution_release }}-updates main restricted universe -deb {{ ubuntu_repo }} {{ ansible_distribution_release }}-backports main restricted universe -deb {{ ubuntu_security_repo }} {{ ansible_distribution_release }}-security main restricted universe +deb {{ ubuntu_repo }} {{ ansible_facts['distribution_release'] }} main restricted universe +deb {{ ubuntu_repo }} {{ ansible_facts['distribution_release'] }}-updates main restricted universe +deb {{ ubuntu_repo }} {{ ansible_facts['distribution_release'] }}-backports main restricted universe +deb {{ ubuntu_security_repo }} {{ ansible_facts['distribution_release'] }}-security main restricted universe diff --git a/images/capi/ansible/roles/sysprep/defaults/main.yml b/images/capi/ansible/roles/sysprep/defaults/main.yml index 2babaa9807..33ac3fa677 100644 --- a/images/capi/ansible/roles/sysprep/defaults/main.yml +++ b/images/capi/ansible/roles/sysprep/defaults/main.yml @@ -17,3 +17,4 @@ pip_conf_file: "" remove_extra_repos: false flatcar_disable_autologin: false sysprep_require_grub_file: true +extra_kernel_boot_params: "" diff --git a/images/capi/ansible/roles/sysprep/tasks/azurelinux.yml b/images/capi/ansible/roles/sysprep/tasks/azurelinux.yml index 251dd74a89..311121bc35 100644 --- a/images/capi/ansible/roles/sysprep/tasks/azurelinux.yml +++ b/images/capi/ansible/roles/sysprep/tasks/azurelinux.yml @@ -24,7 +24,8 @@ ansible.builtin.set_fact: package_list: "{{ ansible_facts.packages.keys() | join(' ') }}" -- ansible.builtin.import_tasks: rpm_repos.yml +- name: Import RPM repository tasks + ansible.builtin.import_tasks: rpm_repos.yml - name: Ensure nftables config ends with a newline ansible.builtin.shell: /bin/echo "" >> /etc/sysconfig/nftables.conf @@ -36,7 +37,7 @@ name: swap.target enabled: false masked: true - when: ansible_memory_mb.swap.total != 0 + when: ansible_facts['memory_mb'].swap.total != 0 - name: Remove the kickstart log ansible.builtin.file: diff --git a/images/capi/ansible/roles/sysprep/tasks/debian.yml b/images/capi/ansible/roles/sysprep/tasks/debian.yml index 6f5b7e933e..fd3c4163e6 100644 --- a/images/capi/ansible/roles/sysprep/tasks/debian.yml +++ b/images/capi/ansible/roles/sysprep/tasks/debian.yml @@ -71,7 +71,7 @@ ansible.builtin.service: name: rsyslog state: stopped - when: "'rsyslog' in services" + when: "'rsyslog' in ansible_facts.services" - name: Remove apt package caches ansible.builtin.apt: @@ -119,9 +119,9 @@ path: /usr/sbin/update-grub register: _stat_update_grub -- name: Configure grub for non graphical consoles - ansible.builtin.copy: - src: etc/default/grub.d/50-cloudimg-settings.cfg +- name: Configure grub for non graphical consoles and add user extra kernel boot params + ansible.builtin.template: + src: templates/etc/default/grub.d/50-cloudimg-settings.cfg dest: /etc/default/grub.d/50-cloudimg-settings.cfg group: root owner: root @@ -136,28 +136,28 @@ ansible.builtin.file: path: /etc/cloud/cloud.cfg.d/subiquity-disable-cloudinit-networking.cfg state: absent - when: ansible_distribution_version is version('22.04', '>=') + when: ansible_facts['distribution_version'] is version('22.04', '>=') - name: Removing 99-installer.cfg which sets the cloud-init datasource to None ansible.builtin.file: path: /etc/cloud/cloud.cfg.d/99-installer.cfg state: absent - when: ansible_distribution_version is version('22.04', '>=') + when: ansible_facts['distribution_version'] is version('22.04', '>=') - name: Removing subiquity curtin preserve sources config ansible.builtin.file: path: /etc/cloud/cloud.cfg.d/curtin-preserve-sources.cfg state: absent - when: ansible_distribution_version is version('22.04', '>=') + when: ansible_facts['distribution_version'] is version('22.04', '>=') - name: Removing cloud-init ds identify config ansible.builtin.file: path: /etc/cloud/ds-identify.cfg state: absent - when: ansible_distribution_version is version('22.04', '>=') + when: ansible_facts['distribution_version'] is version('22.04', '>=') - name: Removing 90-installer-network.cfg installer network configuration ansible.builtin.file: path: /etc/cloud/cloud.cfg.d/90-installer-network.cfg state: absent - when: ansible_distribution_version is version('22.04', '>=') + when: ansible_facts['distribution_version'] is version('22.04', '>=') diff --git a/images/capi/ansible/roles/sysprep/tasks/main.yml b/images/capi/ansible/roles/sysprep/tasks/main.yml index 8782e98775..1c7402a1a8 100644 --- a/images/capi/ansible/roles/sysprep/tasks/main.yml +++ b/images/capi/ansible/roles/sysprep/tasks/main.yml @@ -12,20 +12,25 @@ # See the License for the specific language governing permissions and # limitations under the License. --- -- ansible.builtin.import_tasks: debian.yml - when: ansible_os_family == "Debian" +- name: Import Debian sysprep tasks + ansible.builtin.import_tasks: debian.yml + when: ansible_facts['os_family'] == "Debian" -- ansible.builtin.import_tasks: flatcar.yml - when: ansible_os_family == "Flatcar" +- name: Import Flatcar sysprep tasks + ansible.builtin.import_tasks: flatcar.yml + when: ansible_facts['os_family'] == "Flatcar" -- ansible.builtin.import_tasks: redhat.yml - when: ansible_os_family == "RedHat" +- name: Import RedHat sysprep tasks + ansible.builtin.import_tasks: redhat.yml + when: ansible_facts['os_family'] == "RedHat" -- ansible.builtin.import_tasks: azurelinux.yml - when: ansible_os_family in ["Common Base Linux Mariner", "Microsoft Azure Linux"] +- name: Import Azure Linux sysprep tasks + ansible.builtin.import_tasks: azurelinux.yml + when: ansible_facts['os_family'] in ["Common Base Linux Mariner", "Microsoft Azure Linux"] -- ansible.builtin.import_tasks: photon.yml - when: ansible_os_family == "VMware Photon OS" +- name: Import Photon sysprep tasks + ansible.builtin.import_tasks: photon.yml + when: ansible_facts['os_family'] == "VMware Photon OS" - name: Remove containerd http proxy conf file if needed ansible.builtin.file: @@ -51,7 +56,7 @@ loop: - { path: /etc/machine-id, state: absent, mode: "{{ machine_id_mode }}" } - { path: /etc/machine-id, state: touch, mode: "{{ machine_id_mode }}" } - when: ansible_os_family not in ["Common Base Linux Mariner", "Flatcar", "Microsoft Azure Linuz"] + when: ansible_facts['os_family'] not in ["Common Base Linux Mariner", "Flatcar", "Microsoft Azure Linuz"] - name: Truncate hostname file ansible.builtin.file: @@ -67,7 +72,9 @@ - name: Set hostname ansible.builtin.hostname: name: localhost.local - when: ansible_os_family not in ["Common Base Linux Mariner", "Flatcar", "Microsoft Azure Linux", "VMware Photon OS"] and packer_build_name != "nutanix" + when: > + ansible_facts['os_family'] not in ["Common Base Linux Mariner", "Flatcar", "Microsoft Azure Linux", "VMware Photon OS"] + and packer_build_name != "nutanix" - name: Reset hosts file ansible.builtin.copy: @@ -104,13 +111,13 @@ ansible.builtin.shell: cmd: | cloud-init clean --machine-id - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Reset cloud-init ansible.builtin.shell: cmd: | cloud-init clean - when: ansible_os_family == "RedHat" + when: ansible_facts['os_family'] == "RedHat" - name: Remove cloud-init.disabled ansible.builtin.file: @@ -164,7 +171,7 @@ src: files/etc/netplan/51-kubevirt-netplan.yaml dest: /etc/netplan/51-kubevirt-netplan.yaml mode: "0644" - when: ansible_os_family == "Debian" and kubevirt == "true" + when: ansible_facts['os_family'] == "Debian" and kubevirt == "true" - name: Find SSH host keys ansible.builtin.find: @@ -184,8 +191,8 @@ path: "{{ item.path }}" loop: - { path: /root/.ssh/authorized_keys } - - { path: "/home/{{ ansible_env.SUDO_USER | default(ansible_user_id) }}/.ssh/authorized_keys" } - when: ansible_os_family != "Flatcar" + - { path: "/home/{{ ansible_facts['env'].SUDO_USER | default(ansible_facts['user_id']) }}/.ssh/authorized_keys" } + when: ansible_facts['os_family'] != "Flatcar" - name: Remove SSH authorized users for Flatcar ansible.builtin.file: @@ -193,7 +200,7 @@ path: "{{ item.path }}" loop: - { path: /root/.ssh/authorized_keys } - when: ansible_os_family == "Flatcar" + when: ansible_facts['os_family'] == "Flatcar" - name: Truncate all remaining log files in /var/log ansible.builtin.shell: | @@ -202,13 +209,13 @@ args: executable: /bin/bash - when: ansible_os_family != "Flatcar" + when: ansible_facts['os_family'] != "Flatcar" - name: Delete all logrotated logs ansible.builtin.shell: cmd: | find /var/log -type f -regex '.*[0-9z]$' -exec rm {} + - when: ansible_os_family != "Flatcar" + when: ansible_facts['os_family'] != "Flatcar" - name: Remove swapfile ansible.builtin.file: @@ -218,7 +225,7 @@ - /swap.img - /swapfile - /mnt/resource/swapfile - when: ansible_memory_mb.swap.total != 0 + when: ansible_facts['memory_mb'].swap.total != 0 - name: Truncate shell history ansible.builtin.file: @@ -226,13 +233,13 @@ path: "{{ item.path }}" loop: - { path: /root/.bash_history } - - { path: "/home/{{ ansible_env.SUDO_USER | default(ansible_user_id) }}/.bash_history" } + - { path: "/home/{{ ansible_facts['env'].SUDO_USER | default(ansible_facts['user_id']) }}/.bash_history" } - name: Rotate journalctl to archive logs ansible.builtin.shell: cmd: | journalctl --rotate - when: not ( ansible_os_family == "RedHat" and ansible_distribution_major_version|int <= 7 ) + when: not ( ansible_facts['os_family'] == "RedHat" and ansible_facts['distribution_major_version']|int <= 7 ) - name: Remove archived journalctl logs ansible.builtin.shell: @@ -246,22 +253,22 @@ owner: root group: root mode: "0644" - when: ansible_os_family == "Flatcar" + when: ansible_facts['os_family'] == "Flatcar" - name: Remove any default Ignition files used by Packer ansible.builtin.file: state: absent path: /usr/share/oem/config.ign - when: ansible_os_family == "Flatcar" + when: ansible_facts['os_family'] == "Flatcar" - name: Start fstrim ansible.builtin.systemd: name: fstrim.service state: started - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Start ssh ansible.builtin.systemd: name: ssh enabled: true - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" diff --git a/images/capi/ansible/roles/sysprep/tasks/photon.yml b/images/capi/ansible/roles/sysprep/tasks/photon.yml index 41f77836e1..fba507dda5 100644 --- a/images/capi/ansible/roles/sysprep/tasks/photon.yml +++ b/images/capi/ansible/roles/sysprep/tasks/photon.yml @@ -41,7 +41,8 @@ regexp: ^excludepkgs= line: excludepkgs={{ package_list }} -- ansible.builtin.import_tasks: rpm_repos.yml +- name: Import RPM repository tasks + ansible.builtin.import_tasks: rpm_repos.yml - name: Update the repos package to import the recent gpg keys ansible.builtin.command: tdnf update -y photon-repos --enablerepo=photon --refresh diff --git a/images/capi/ansible/roles/sysprep/tasks/redhat.yml b/images/capi/ansible/roles/sysprep/tasks/redhat.yml index 1b8870d9ec..78ab8670c4 100644 --- a/images/capi/ansible/roles/sysprep/tasks/redhat.yml +++ b/images/capi/ansible/roles/sysprep/tasks/redhat.yml @@ -29,35 +29,22 @@ path: /etc/yum.conf regexp: ^exclude= line: exclude={{ package_list }} - when: ansible_distribution != "Amazon" or ansible_distribution_version != "2023" + when: ansible_facts['distribution'] != "Amazon" or ansible_facts['distribution_version'] != "2023" - name: Exclude packages from upgrade ansible.builtin.lineinfile: path: /etc/dnf/dnf.conf regexp: ^excludepkgs= line: excludepkgs={{ package_list }} - when: ansible_distribution == "Amazon" and ansible_distribution_version == "2023" + when: ansible_facts['distribution'] == "Amazon" and ansible_facts['distribution_version'] == "2023" -- ansible.builtin.import_tasks: rpm_repos.yml +- name: Import RPM repository tasks + ansible.builtin.import_tasks: rpm_repos.yml -# Oracle Linux does not have temp-disk-swapfile service -- name: Disable swap service and ensure it is masked - ansible.builtin.systemd: - name: temp-disk-swapfile - enabled: false - masked: true - when: ansible_memory_mb.swap.total != 0 and ansible_distribution_major_version|int <= 7 - -- name: Disable swap service and ensure it is masked on RHEL 8 - ansible.builtin.systemd: - name: swap.target - enabled: false - masked: true - when: ansible_memory_mb.swap.total != 0 and ansible_distribution_major_version|int == 8 - name: Remove RHEL subscription when: - - ansible_distribution == "RedHat" + - ansible_facts['distribution'] == "RedHat" - lookup('env', 'RHSM_USER') | length > 0 - lookup('env', 'RHSM_PASS') | length > 0 block: @@ -74,7 +61,7 @@ ansible.builtin.command: subscription-manager clean - name: Remove yum package caches - ansible.builtin.yum: + ansible.builtin.dnf: autoremove: true lock_timeout: 60 @@ -85,15 +72,15 @@ ansible.builtin.shell: | set -o pipefail sed -i '/^\(HWADDR\|UUID\)=/d' /etc/sysconfig/network-scripts/ifcfg-* - when: packer_builder_type != "googlecompute" and ansible_distribution_major_version|int != 9 + when: packer_builder_type != "googlecompute" and ansible_facts['distribution_major_version']|int != 9 - name: Migrate interface configuration files to NetworkManager keyfiles ansible.builtin.command: nmcli connection migrate - when: packer_builder_type != "googlecompute" and ansible_distribution_major_version|int == 9 + when: packer_builder_type != "googlecompute" and ansible_facts['distribution_major_version']|int == 9 - name: Reset network interface IDs ansible.builtin.shell: sed -i '/^\(uuid\)=/d' /etc/NetworkManager/system-connections/*.nmconnection - when: packer_builder_type != "googlecompute" and ansible_distribution_major_version|int == 9 + when: packer_builder_type != "googlecompute" and ansible_facts['distribution_major_version']|int == 9 - name: Remove the kickstart log ansible.builtin.file: diff --git a/images/capi/ansible/roles/sysprep/files/etc/default/grub.d/50-cloudimg-settings.cfg b/images/capi/ansible/roles/sysprep/templates/etc/default/grub.d/50-cloudimg-settings.cfg similarity index 77% rename from images/capi/ansible/roles/sysprep/files/etc/default/grub.d/50-cloudimg-settings.cfg rename to images/capi/ansible/roles/sysprep/templates/etc/default/grub.d/50-cloudimg-settings.cfg index d36c0b180d..9751434d32 100644 --- a/images/capi/ansible/roles/sysprep/files/etc/default/grub.d/50-cloudimg-settings.cfg +++ b/images/capi/ansible/roles/sysprep/templates/etc/default/grub.d/50-cloudimg-settings.cfg @@ -8,7 +8,7 @@ GRUB_RECORDFAIL_TIMEOUT=0 GRUB_TIMEOUT=0 # Set the default commandline -GRUB_CMDLINE_LINUX_DEFAULT="console=tty1 console=ttyS0" +GRUB_CMDLINE_LINUX_DEFAULT="console=tty1 console=ttyS0 {{ extra_kernel_boot_params }}" # Set the grub console type GRUB_TERMINAL=console diff --git a/images/capi/ansible/windows/ansible_winrm.ps1 b/images/capi/ansible/windows/ansible_winrm.ps1 index 04ba0997fa..33ec89d678 100644 --- a/images/capi/ansible/windows/ansible_winrm.ps1 +++ b/images/capi/ansible/windows/ansible_winrm.ps1 @@ -12,11 +12,29 @@ # See the License for the specific language governing permissions and # limitations under the License. -# This file is from packer documentation: +# This file is from packer documentation: # https://www.packer.io/docs/provisioners/ansible.html#winrm-communicator # https://www.packer.io/docs/builders/amazon/ebs#connecting-to-windows-instances-using-winrm -Set-ExecutionPolicy Unrestricted -Scope LocalMachine -Force -ErrorAction Ignore +# Log execution policies at all scopes for diagnostics +Write-Output "Current execution policy settings:" +Get-ExecutionPolicy -List | Format-Table -AutoSize | Out-String | Write-Output + +# Only set execution policy if the current effective policy is more restrictive +# than what we need. Policies like Bypass or Unrestricted are already sufficient. +$currentPolicy = Get-ExecutionPolicy +$sufficientPolicies = @('Bypass', 'Unrestricted') +if ($currentPolicy -notin $sufficientPolicies) { + Write-Output "Effective execution policy '$currentPolicy' is insufficient, setting to Unrestricted" + try { + Set-ExecutionPolicy Unrestricted -Scope LocalMachine -Force -ErrorAction Stop + Write-Output "Execution policy set to Unrestricted" + } catch { + Write-Output "Failed to set execution policy: $_" + } +} else { + Write-Output "Effective execution policy '$currentPolicy' is sufficient, skipping Set-ExecutionPolicy" +} # Don't set this before Set-ExecutionPolicy as it throws an error $ErrorActionPreference = "stop" @@ -43,6 +61,12 @@ cmd.exe /c winrm set "winrm/config/service/auth" '@{CredSSP="true"}' cmd.exe /c winrm set "winrm/config/listener?Address=*+Transport=HTTPS" "@{Port=`"5986`";Hostname=`"packer`";CertificateThumbprint=`"$($Cert.Thumbprint)`"}" cmd.exe /c netsh advfirewall firewall set rule group="remote administration" new enable=yes cmd.exe /c netsh firewall add portopening TCP 5986 "Port 5986" -cmd.exe /c net stop winrm cmd.exe /c sc config winrm start= auto -cmd.exe /c net start winrm + +# Restart WinRM via a scheduled task so the current session can finish +# cleanly before the service cycles. This prevents Packer's WinRM +# connection (which is running this script) from being severed mid-flight. +$taskAction = New-ScheduledTaskAction -Execute "powershell.exe" -Argument "-Command Restart-Service winrm -Force" +$taskTrigger = New-ScheduledTaskTrigger -Once -At ((Get-Date).AddSeconds(5)) +Register-ScheduledTask -TaskName "RestartWinRM" -Action $taskAction -Trigger $taskTrigger -User "SYSTEM" -RunLevel Highest -Force +write-output "Scheduled WinRM restart in 5 seconds" diff --git a/images/capi/ansible/windows/example.vars.yml b/images/capi/ansible/windows/example.vars.yml index 1c63e24901..397669fb82 100644 --- a/images/capi/ansible/windows/example.vars.yml +++ b/images/capi/ansible/windows/example.vars.yml @@ -20,7 +20,7 @@ gmsa_keyvault_url: https://kubernetesartifacts.azureedge.net/ccgakvplugin/v1.1.4 runtime: containerd kubernetes_install_path: c:\k windows_service_manager: nssm -pause_image: registry.k8s.io/pause:3.10 +pause_image: registry.k8s.io/pause:3.10.2 load_additional_components: true additional_registry_images: true additional_registry_images_list: sigwindowstools/kube-proxy:v1.28.4-calico-hostprocess, sigwindowstools/calico-node:v3.26.1-hostprocess diff --git a/images/capi/ansible/windows/roles/cloudbase-init/tasks/main.yml b/images/capi/ansible/windows/roles/cloudbase-init/tasks/main.yml index 93893e4616..45674a2458 100644 --- a/images/capi/ansible/windows/roles/cloudbase-init/tasks/main.yml +++ b/images/capi/ansible/windows/roles/cloudbase-init/tasks/main.yml @@ -49,5 +49,5 @@ Remove-Item -Force {{ systemdrive.stdout | trim }}\Windows\Setup\Scripts\SetupComplete.cmd & "{{ programfiles.stdout | trim }}\Cloudbase Solutions\Cloudbase-Init\bin\SetSetupComplete.cmd" become: true - become_method: runas + become_method: ansible.builtin.runas become_user: System diff --git a/images/capi/ansible/windows/roles/gmsa/tasks/main.yml b/images/capi/ansible/windows/roles/gmsa/tasks/main.yml index 09fed92655..b07aea40fd 100644 --- a/images/capi/ansible/windows/roles/gmsa/tasks/main.yml +++ b/images/capi/ansible/windows/roles/gmsa/tasks/main.yml @@ -12,5 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. --- -- ansible.builtin.import_tasks: gmsa_keyvault.yml +- name: Import gMSA KeyVault tasks + ansible.builtin.import_tasks: gmsa_keyvault.yml when: gmsa_keyvault | bool diff --git a/images/capi/ansible/windows/roles/kubernetes/tasks/kubelet.yml b/images/capi/ansible/windows/roles/kubernetes/tasks/kubelet.yml index bbdbb795a6..fac13a7d5b 100644 --- a/images/capi/ansible/windows/roles/kubernetes/tasks/kubelet.yml +++ b/images/capi/ansible/windows/roles/kubernetes/tasks/kubelet.yml @@ -28,10 +28,12 @@ ansible.windows.win_shell: New-Item -path $env:SystemDrive\var\lib\kubelet\etc\kubernetes\pki -type SymbolicLink -value $env:SystemDrive\etc\kubernetes\pki\ -Force when: kubernetes_semver is version('v1.23.0', '<') -- ansible.builtin.import_tasks: nssm.yml +- name: Import NSSM kubelet tasks + ansible.builtin.import_tasks: nssm.yml when: windows_service_manager == "nssm" -- ansible.builtin.import_tasks: sc.yml +- name: Import Windows service kubelet tasks + ansible.builtin.import_tasks: sc.yml when: windows_service_manager == "windows_service" # Dependency selection: https://www.reddit.com/r/ansible/comments/imfdgn/setting_a_variable_conditionally/g41anaf/?utm_source=reddit&utm_medium=web2x&context=3 diff --git a/images/capi/ansible/windows/roles/kubernetes/tasks/main.yml b/images/capi/ansible/windows/roles/kubernetes/tasks/main.yml index 09ae48791f..4084d1ec75 100644 --- a/images/capi/ansible/windows/roles/kubernetes/tasks/main.yml +++ b/images/capi/ansible/windows/roles/kubernetes/tasks/main.yml @@ -17,7 +17,8 @@ path: "{{ kubernetes_install_path }}" state: directory -- ansible.builtin.import_tasks: url.yml +- name: Import Kubernetes URL tasks + ansible.builtin.import_tasks: url.yml - name: Add kubernetes folder to path ansible.windows.win_path: @@ -25,4 +26,5 @@ - "{{ kubernetes_install_path }}" scope: Machine -- ansible.builtin.import_tasks: kubelet.yml +- name: Import kubelet tasks + ansible.builtin.import_tasks: kubelet.yml diff --git a/images/capi/ansible/windows/roles/kubernetes/tasks/sc.yml b/images/capi/ansible/windows/roles/kubernetes/tasks/sc.yml index 3fa42c5785..0c528abfa5 100644 --- a/images/capi/ansible/windows/roles/kubernetes/tasks/sc.yml +++ b/images/capi/ansible/windows/roles/kubernetes/tasks/sc.yml @@ -19,7 +19,7 @@ ansible.windows.win_service: name: kubelet start_mode: auto - path: > + path: >- "{{ kubernetes_install_path }}\kube-log-runner.exe" --log-file={{ systemdrive.stdout | trim }}/var/log/kubelet/kubelet.log {{ kubernetes_install_path }}\kubelet.exe --windows-service --cert-dir={{ systemdrive.stdout | trim }}/var/lib/kubelet/pki diff --git a/images/capi/ansible/windows/roles/load_additional_components/tasks/main.yml b/images/capi/ansible/windows/roles/load_additional_components/tasks/main.yml index 639c8cfde8..04cf2fa236 100644 --- a/images/capi/ansible/windows/roles/load_additional_components/tasks/main.yml +++ b/images/capi/ansible/windows/roles/load_additional_components/tasks/main.yml @@ -12,11 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. --- -- ansible.builtin.import_tasks: executables.yml +- name: Import additional executables tasks + ansible.builtin.import_tasks: executables.yml when: additional_executables | bool -- ansible.builtin.import_tasks: registry.yml +- name: Import additional registry images tasks + ansible.builtin.import_tasks: registry.yml when: additional_registry_images | bool -- ansible.builtin.import_tasks: url.yml +- name: Import additional URL images tasks + ansible.builtin.import_tasks: url.yml when: additional_url_images | bool diff --git a/images/capi/ansible/windows/roles/providers/tasks/azure.yml b/images/capi/ansible/windows/roles/providers/tasks/azure.yml index c67063500c..0bd0ae7b09 100644 --- a/images/capi/ansible/windows/roles/providers/tasks/azure.yml +++ b/images/capi/ansible/windows/roles/providers/tasks/azure.yml @@ -30,7 +30,7 @@ $r = New-NetFirewallRule -DisplayName 'Block-Outbound-168.63.129.16-port-80-for-cve-2021-27075' -Direction Outbound -RemoteAddress '168.63.129.16' -RemotePort '80' -Protocol TCP -Action Block $r | Get-NetFirewallSecurityFilter | Set-NetFirewallSecurityFilter -LocalUser "O:LSD:(D;;CC;;;S-1-5-18)(D;;CC;;;$($wsg.SID.Value))(A;;CC;;;S-1-1-0)" become: true - become_method: runas + become_method: ansible.builtin.runas become_user: SYSTEM - name: Add users to WireServerAccessGroup diff --git a/images/capi/ansible/windows/roles/providers/tasks/main.yml b/images/capi/ansible/windows/roles/providers/tasks/main.yml index f59b4b326c..0740b232f0 100644 --- a/images/capi/ansible/windows/roles/providers/tasks/main.yml +++ b/images/capi/ansible/windows/roles/providers/tasks/main.yml @@ -9,8 +9,10 @@ # See the License for the specific language governing permissions and # limitations under the License. --- -- ansible.builtin.include_tasks: azure.yml +- name: Include Azure provider tasks + ansible.builtin.include_tasks: azure.yml when: packer_builder_type.startswith('azure') -- ansible.builtin.include_tasks: vmware.yml +- name: Include VMware provider tasks + ansible.builtin.include_tasks: vmware.yml when: packer_builder_type is search('vmware') or packer_builder_type is search('vsphere') diff --git a/images/capi/ansible/windows/roles/runtimes/defaults/main.yml b/images/capi/ansible/windows/roles/runtimes/defaults/main.yml index e5056ef2ac..407480bdb9 100644 --- a/images/capi/ansible/windows/roles/runtimes/defaults/main.yml +++ b/images/capi/ansible/windows/roles/runtimes/defaults/main.yml @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. --- -pause_image: mcr.microsoft.com/oss/kubernetes/pause:3.10 +pause_image: mcr.microsoft.com/oss/v2/kubernetes/pause:3.10.1-3 containerd_additional_settings: "" containerd_config_file: config.toml diff --git a/images/capi/ansible/windows/roles/runtimes/tasks/main.yml b/images/capi/ansible/windows/roles/runtimes/tasks/main.yml index 80eececa8f..891ff49dde 100644 --- a/images/capi/ansible/windows/roles/runtimes/tasks/main.yml +++ b/images/capi/ansible/windows/roles/runtimes/tasks/main.yml @@ -12,5 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. --- -- ansible.builtin.import_tasks: containerd.yml +- name: Import containerd runtime tasks + ansible.builtin.import_tasks: containerd.yml when: runtime == "containerd" diff --git a/images/capi/ansible/windows/roles/runtimes/templates/config.toml b/images/capi/ansible/windows/roles/runtimes/templates/config.toml index 87d306362b..dcb70ded14 100644 --- a/images/capi/ansible/windows/roles/runtimes/templates/config.toml +++ b/images/capi/ansible/windows/roles/runtimes/templates/config.toml @@ -25,6 +25,10 @@ imports = ["{{ containerd_conf_dir }}\\conf.d\\*.toml"] [plugins] {% if containerd_version is version('2.0.0', '>=') %} +{% if containerd_image_pull_progress_timeout | default('') | length > 0 %} + [plugins.'io.containerd.cri.v1.images'] + image_pull_progress_timeout = "{{ containerd_image_pull_progress_timeout }}" +{% endif %} [plugins.'io.containerd.cri.v1.images'.pinned_images] sandbox = "{{ pause_image }}" [plugins.'io.containerd.cri.v1.runtime'.cni] diff --git a/images/capi/ansible/windows/roles/systemprep/tasks/main.yml b/images/capi/ansible/windows/roles/systemprep/tasks/main.yml index 4e27026869..6118096284 100644 --- a/images/capi/ansible/windows/roles/systemprep/tasks/main.yml +++ b/images/capi/ansible/windows/roles/systemprep/tasks/main.yml @@ -97,10 +97,12 @@ reboot: true when: windows_updates_category_names|length > 0 -- ansible.builtin.import_tasks: ssh-feature.yml +- name: Import SSH feature tasks + ansible.builtin.import_tasks: ssh-feature.yml when: ssh_source_url == "" -- ansible.builtin.import_tasks: ssh-archive.yml +- name: Import SSH archive tasks + ansible.builtin.import_tasks: ssh-archive.yml when: ssh_source_url != "" - name: Set default SSH shell to Powershell @@ -133,6 +135,16 @@ type: dword when: distribution_version == "2022" +# VPF changes to reduce lock contention +- name: Apply networking fix for Windows 2025 + ansible.windows.win_regedit: + path: HKLM:\SYSTEM\CurrentControlSet\Policies\Microsoft\FeatureManagement\Overrides + state: present + name: 520997518 + data: 1 + type: dword + when: distribution_version == "2025" + # Apply HNS flags for fixes that need to be enabled via Registry # these eventually get turned on automatically and can be removed in future releases - name: Apply HNS control Flags 0x40 and 0x10 in 2022-11B patches diff --git a/images/capi/ansible/windows/roles/systemprep/tasks/ssh-feature.yml b/images/capi/ansible/windows/roles/systemprep/tasks/ssh-feature.yml index 5c841ac096..5bb543345f 100644 --- a/images/capi/ansible/windows/roles/systemprep/tasks/ssh-feature.yml +++ b/images/capi/ansible/windows/roles/systemprep/tasks/ssh-feature.yml @@ -18,5 +18,5 @@ - name: Install OpenSSH ansible.windows.win_shell: Add-WindowsCapability -Online -Name OpenSSH.Server~~~~0.0.1.0 become: true - become_method: runas + become_method: ansible.builtin.runas become_user: SYSTEM diff --git a/images/capi/azure_targets.sh b/images/capi/azure_targets.sh index 88a428dad0..2f03cbe7bc 100644 --- a/images/capi/azure_targets.sh +++ b/images/capi/azure_targets.sh @@ -1,4 +1,4 @@ -VHD_TARGETS="ubuntu-2204 ubuntu-2404 azurelinux-3 rhel-8 windows-2019-containerd windows-2022-containerd" -SIG_TARGETS="ubuntu-2204 ubuntu-2404 azurelinux-3 rhel-8 windows-2019-containerd windows-2022-containerd windows-2025-containerd flatcar" +VHD_TARGETS="ubuntu-2204 ubuntu-2404 azurelinux-3 windows-2022-containerd" +SIG_TARGETS="ubuntu-2204 ubuntu-2404 azurelinux-3 windows-2022-containerd windows-2025-containerd flatcar" SIG_GEN2_TARGETS="ubuntu-2204 ubuntu-2404 azurelinux-3 flatcar" -SIG_CVM_TARGETS="ubuntu-2204 ubuntu-2404 windows-2019-containerd windows-2022-containerd" +SIG_CVM_TARGETS="ubuntu-2204 ubuntu-2404 windows-2022-containerd" diff --git a/images/capi/hack/ensure-ansible-lint.sh b/images/capi/hack/ensure-ansible-lint.sh index a876a64c37..b194c29e13 100755 --- a/images/capi/hack/ensure-ansible-lint.sh +++ b/images/capi/hack/ensure-ansible-lint.sh @@ -22,7 +22,7 @@ set -o pipefail source hack/utils.sh -_version="6.21.1" +_version="25.2.0" # Change directories to the parent directory of the one in which this # script is located. diff --git a/images/capi/hack/ensure-ansible.sh b/images/capi/hack/ensure-ansible.sh index 8c2cff4357..f767f68019 100755 --- a/images/capi/hack/ensure-ansible.sh +++ b/images/capi/hack/ensure-ansible.sh @@ -49,7 +49,7 @@ fi echo ${ansible_version[*]} ansible-galaxy collection install \ - community.general \ - ansible.posix \ + 'community.general:<=12.0.0' \ + 'ansible.posix' \ 'ansible.windows:>=1.7.0' \ community.windows diff --git a/images/capi/hack/ensure-ct.sh b/images/capi/hack/ensure-butane.sh similarity index 73% rename from images/capi/hack/ensure-ct.sh rename to images/capi/hack/ensure-butane.sh index b58474acb1..40f9a3e67d 100755 --- a/images/capi/hack/ensure-ct.sh +++ b/images/capi/hack/ensure-butane.sh @@ -20,7 +20,7 @@ set -o pipefail [[ -n ${DEBUG:-} ]] && set -o xtrace -_version="v0.9.3" +_version="v0.25.1" # Change directories to the parent directory of the one in which this # script is located. @@ -33,11 +33,11 @@ if command -v ct >/dev/null 2>&1; then exit 0; fi mkdir -p .local/bin && cd .local/bin if [[ ${HOSTOS} == "linux" ]]; then - _binfile="ct-${_version}-x86_64-unknown-linux-gnu" + _binfile="butane-x86_64-unknown-linux-gnu" elif [[ ${HOSTOS} == "darwin" ]]; then - _binfile="ct-${_version}-x86_64-apple-darwin" + _binfile="butane-x86_64-apple-darwin" fi -_bin_url="https://github.com/flatcar/container-linux-config-transpiler/releases/download/${_version}/${_binfile}" -curl -SsL "${_bin_url}" -o ct -chmod 0755 ct -echo "'ct' has been installed to $(pwd), make sure this directory is in your \$PATH" +_bin_url="https://github.com/coreos/butane/releases/download/${_version}/${_binfile}" +curl -SsL "${_bin_url}" -o butane +chmod 0755 butane +echo "'butane' has been installed to $(pwd), make sure this directory is in your \$PATH" diff --git a/images/capi/hack/image-build-ova.py b/images/capi/hack/image-build-ova.py index 78ba124cb4..4c7f622b25 100755 --- a/images/capi/hack/image-build-ova.py +++ b/images/capi/hack/image-build-ova.py @@ -108,9 +108,6 @@ def main(): vmdk = vmdk_files[0] OS_id_map = {"vmware-photon-64": {"id": "36", "version": "", "type": "vmwarePhoton64Guest"}, - "centos7-64": {"id": "107", "version": "7", "type": "centos7_64Guest"}, - "centos8-64": {"id": "107", "version": "8", "type": "centos8_64Guest"}, - "rhel8-64": {"id": "80", "version": "8", "type": "rhel8_64Guest"}, "rhel9-64": {"id": "80", "version": "9", "type": "rhel9_64Guest"}, "rockylinux-64": {"id": "80", "version": "", "type": "rockylinux_64Guest"}, "ubuntu-64": {"id": "94", "version": "", "type": "ubuntu64Guest"}, diff --git a/images/capi/hack/utils.sh b/images/capi/hack/utils.sh index df2db68397..090133cf9f 100755 --- a/images/capi/hack/utils.sh +++ b/images/capi/hack/utils.sh @@ -14,8 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Note: ansible-core v2.16.x requires Python >= 3.10. -_version_ansible_core="2.15.13" +# Note: ansible-core v2.18 supports Python 3.11-3.13. +_version_ansible_core="2.18.15" case "${OSTYPE}" in linux*) @@ -107,6 +107,9 @@ pip3_install() { ensure_py3 if output=$(pip3 install --disable-pip-version-check --user "${@}" 2>&1); then echo "$output" + elif [[ $output == *"Can not perform a '--user' install"* ]]; then + >&2 echo "warning: '--user' install failed, retrying pip3 install without --user" + pip3 install --disable-pip-version-check "${@}" elif [[ $output == *"error: externally-managed-environment"* ]]; then >&2 echo "warning: externally-managed-environment, retrying pip3 install with --break-system-packages" pip3 install --disable-pip-version-check --user --break-system-packages "${@}" diff --git a/images/capi/packer/ami/flatcar-arm64.json b/images/capi/packer/ami/flatcar-arm64.json index 0849746a97..76f3823073 100644 --- a/images/capi/packer/ami/flatcar-arm64.json +++ b/images/capi/packer/ami/flatcar-arm64.json @@ -6,7 +6,6 @@ "arch": "arm64", "build_name": "flatcar-{{env `FLATCAR_CHANNEL`}}", "builder_instance_type": "t4g.small", - "crictl_source_type": "http", "distribution": "flatcar", "goss_arch": "arm64", "kubernetes_cni_source_type": "http", @@ -18,5 +17,6 @@ "sysusr_prefix": "/opt", "sysusrlocal_prefix": "/opt", "user_data": "", - "user_data_file": "packer/files/flatcar/ignition/bootstrap-cloud.json" + "user_data_file": "packer/files/flatcar/ignition/bootstrap-cloud.json", + "volume_size": "15" } diff --git a/images/capi/packer/ami/flatcar.json b/images/capi/packer/ami/flatcar.json index fb7a1b7688..71f6c56fdf 100644 --- a/images/capi/packer/ami/flatcar.json +++ b/images/capi/packer/ami/flatcar.json @@ -5,7 +5,6 @@ "ansible_extra_vars": "ansible_python_interpreter=/opt/bin/python", "arch": "amd64", "build_name": "flatcar-{{env `FLATCAR_CHANNEL`}}", - "crictl_source_type": "http", "distribution": "flatcar", "kubernetes_cni_source_type": "http", "kubernetes_source_type": "http", @@ -16,5 +15,6 @@ "sysusr_prefix": "/opt", "sysusrlocal_prefix": "/opt", "user_data": "", - "user_data_file": "packer/files/flatcar/ignition/bootstrap-cloud.json" + "user_data_file": "packer/files/flatcar/ignition/bootstrap-cloud.json", + "volume_size": "15" } diff --git a/images/capi/packer/ami/packer-windows.json b/images/capi/packer/ami/packer-windows.json index d2ed468ded..bdc6723098 100644 --- a/images/capi/packer/ami/packer-windows.json +++ b/images/capi/packer/ami/packer-windows.json @@ -126,6 +126,7 @@ "vars_inline": { "OS": "{{user `distribution` | lower}}", "PROVIDER": "amazon", + "containerd_image_pull_progress_timeout": "{{user `containerd_image_pull_progress_timeout`}}", "containerd_version": "{{user `containerd_version`}}", "distribution_version": "{{user `distribution_version`}}", "kubernetes_version": "{{user `kubernetes_semver`}}", @@ -177,13 +178,14 @@ "cloudbase_metadata_services_unattend": "cloudbaseinit.metadata.services.base.EmptyMetadataService", "cloudbase_plugins": "cloudbaseinit.plugins.windows.createuser.CreateUserPlugin, cloudbaseinit.plugins.common.setuserpassword.SetUserPasswordPlugin, cloudbaseinit.plugins.windows.extendvolumes.ExtendVolumesPlugin, cloudbaseinit.plugins.common.userdata.UserDataPlugin, cloudbaseinit.plugins.common.ephemeraldisk.EphemeralDiskPlugin, cloudbaseinit.plugins.common.mtu.MTUPlugin, cloudbaseinit.plugins.common.sethostname.SetHostNamePlugin, cloudbaseinit.plugins.common.sshpublickeys.SetUserSSHPublicKeysPlugin", "cloudbase_plugins_unattend": "cloudbaseinit.plugins.common.mtu.MTUPlugin", + "containerd_image_pull_progress_timeout": null, "containerd_version": null, "encrypted": "false", "iam_instance_profile": "", "ib_version": "{{env `IB_VERSION`}}", "image_name": "capa-ami-{{user `build_name`}}-{{user `kubernetes_semver`}}-{{user `build_timestamp`}}", "kms_key_id": "", - "kubernetes_base_url": "https://kubernetesreleases.blob.core.windows.net/kubernetes/{{user `kubernetes_semver`}}/binaries/node/windows/{{user `kubernetes_goarch`}}", + "kubernetes_base_url": "https://kubernetesreleases.blob.core.windows.net/kubernetes/{{user `kubernetes_semver`}}/bin/windows/{{user `kubernetes_goarch`}}", "manifest_output": "manifest.json", "nssm_url": null, "prepull": null, diff --git a/images/capi/packer/ami/packer.json b/images/capi/packer/ami/packer.json index 04f6b3054d..1ca76fe352 100644 --- a/images/capi/packer/ami/packer.json +++ b/images/capi/packer/ami/packer.json @@ -131,6 +131,7 @@ "PROVIDER": "amazon", "containerd_gvisor_runtime": "{{user `containerd_gvisor_runtime`}}", "containerd_gvisor_version": "{{user `containerd_gvisor_version`}}", + "containerd_image_pull_progress_timeout": "{{user `containerd_image_pull_progress_timeout`}}", "containerd_version": "{{user `containerd_version`}}", "kubernetes_cni_deb_version": "{{ user `kubernetes_cni_deb_version` }}", "kubernetes_cni_rpm_version": "{{ split (user `kubernetes_cni_rpm_version`) \"-\" 0 }}", @@ -164,6 +165,7 @@ "builder_instance_type": "t3.small", "containerd_gvisor_runtime": "false", "containerd_gvisor_version": "latest", + "containerd_image_pull_progress_timeout": null, "containerd_service_url": "https://raw.githubusercontent.com/containerd/containerd/refs/tags/v{{user `containerd_version`}}/containerd.service", "containerd_version": null, "crictl_version": null, diff --git a/images/capi/packer/ami/rhel-8.json b/images/capi/packer/ami/rhel-8.json deleted file mode 100644 index ea506ccb7a..0000000000 --- a/images/capi/packer/ami/rhel-8.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "ami_filter_arch": "x86_64", - "ami_filter_name": "RHEL-8.6.0_HVM-*", - "ami_filter_owners": "309956199498", - "arch": "amd64", - "build_name": "rhel-8", - "builder_instance_type": "m5.large", - "distribution": "rhel", - "distribution_release": "Enterprise", - "distribution_version": "8", - "epel_rpm_gpg_key": "https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-8", - "redhat_epel_rpm": "https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm", - "root_device_name": "/dev/sda1", - "source_ami": "", - "ssh_username": "ec2-user", - "volume_size": "10" -} diff --git a/images/capi/packer/ami/rockylinux-8.json b/images/capi/packer/ami/rockylinux-8.json deleted file mode 100644 index cc2c494720..0000000000 --- a/images/capi/packer/ami/rockylinux-8.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "ami_filter_arch": "x86_64", - "ami_filter_name": "Rocky-8-ec2-8.5-*", - "ami_filter_owners": "679593333241", - "arch": "amd64", - "build_name": "rockylinux-8", - "distribution": "rockylinux", - "distribution_release": "Core", - "distribution_version": "8", - "epel_rpm_gpg_key": "https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-8", - "redhat_epel_rpm": "https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm", - "root_device_name": "/dev/sda1", - "source_ami": "", - "ssh_username": "rocky", - "volume_size": "10" -} diff --git a/images/capi/packer/azure/.pipelines/build-sig.yaml b/images/capi/packer/azure/.pipelines/build-sig.yaml deleted file mode 100644 index eb4f744e7c..0000000000 --- a/images/capi/packer/azure/.pipelines/build-sig.yaml +++ /dev/null @@ -1,80 +0,0 @@ -# Required pipeline variables: -# - BUILD_POOL - Azure DevOps build pool to use -# - KUBERNETES_VERSION - version of Kubernetes to build the image with, e.g. `1.31.1` -# - OS - operating system distro, such as 'Ubuntu', 'AzureLinux', or `Windows` -# - OS_VERSION - version of distro, such as `24.04` or `2022-containerd` -# - SERVICE_CONNECTION - Azure DevOps service connection to use for the Azure CLI -# Optional pipeline variables: -# - JOB_NAME - name of the job, defaults to `image-builder-sig-${OS}-${OS_VERSION}` -# - PACKER_FLAGS - additional flags to pass to packer -# - RESOURCE_GROUP - name of the Azure resource group to use for the compute galleries -# - STAGING_GALLERY_NAME - name of the Azure compute gallery for initial image publishing -# - TAGS - tags to apply to the image - -jobs: -- job: build_sig - timeoutInMinutes: 120 - strategy: - maxParallel: 0 - pool: - name: $(BUILD_POOL) - steps: - - template: k8s-config.yaml - - script: | - set -euo pipefail - [[ -n ${DEBUG:-} ]] && set -o xtrace - - kube_proxy_url="sigwindowstools/kube-proxy:v${KUBERNETES_VERSION/+/_}-calico-hostprocess" - echo "Checking for Windows kube-proxy image $kube_proxy_url" - if ! stderr="$(docker pull $kube_proxy_url 2>&1 > /dev/null)"; then - # It's a Windows image, so expect an error after pulling it on Linux - if [[ $stderr != *"cannot be used on this platform"* ]]; then - echo "Failed to pull kube-proxy image: $stderr" - exit 1 - fi - fi - displayName: Check for Windows kube-proxy image - condition: and(eq(variables['PREFLIGHT_CHECKS'], 'true'), eq(variables['OS'], 'Windows')) - - task: AzureCLI@2 - displayName: Build SIG Image - inputs: - azureSubscription: '$(SERVICE_CONNECTION)' - scriptLocation: inlineScript - scriptType: bash - workingDirectory: '$(system.defaultWorkingDirectory)/images/capi' - inlineScript: | - set -euo pipefail - [[ -n ${DEBUG:-} ]] && set -o xtrace - - # Generate locales properly on Azure Linux or ansible will complain - sudo tdnf -y install glibc-i18n - sudo locale-gen.sh - export LC_ALL=en_US.UTF-8 - - os=$(echo "${OS}" | tr '[:upper:]' '[:lower:]') - version=$(echo "${OS_VERSION}" | tr '[:upper:]' '[:lower:]' | tr -d .) - export RESOURCE_GROUP="${RESOURCE_GROUP:-cluster-api-gallery}" - export RESOURCE_GROUP_NAME="${RESOURCE_GROUP}" - - # timestamp is in RFC-3339 format to match kubetest - export TIMESTAMP="$(date -u '+%Y-%m-%dT%H:%M:%SZ')" - export JOB_NAME="${JOB_NAME:-"image-builder-sig-${os}-${version}"}" - export TAGS="${TAGS:-creationTimestamp=${TIMESTAMP} jobName=${JOB_NAME} DO-NOT-DELETE=UpstreamInfra}" - printf "${TAGS}" | tee packer/azure/tags.out - export GALLERY_NAME="${STAGING_GALLERY_NAME:-staging_gallery}" - DISTRO="${os}-${version}" - echo "##vso[task.setvariable variable=DISTRO]$DISTRO" - - # Add build tags in ADO - echo "##vso[build.addbuildtag]$KUBERNETES_VERSION" - echo "##vso[build.addbuildtag]$DISTRO" - - export PACKER_FLAGS="${PACKER_FLAGS} --var sig_image_version=${KUBERNETES_VERSION}" - export PATH=$PATH:$HOME/.local/bin - export USE_AZURE_CLI_AUTH="True" - make build-azure-sig-$os-$version | tee packer/azure/packer.out - - template: sig-publishing-info.yaml - - task: PublishPipelineArtifact@1 - inputs: - artifact: 'publishing-info' - path: '$(system.defaultWorkingDirectory)/images/capi/packer/azure/sig-publishing-info.json' diff --git a/images/capi/packer/azure/.pipelines/clean-sig.yaml b/images/capi/packer/azure/.pipelines/clean-sig.yaml deleted file mode 100644 index 04e58d384a..0000000000 --- a/images/capi/packer/azure/.pipelines/clean-sig.yaml +++ /dev/null @@ -1,59 +0,0 @@ -# Required pipeline variables: -# - BUILD_POOL - Azure DevOps build pool to use -# - SERVICE_CONNECTION - Azure DevOps service connection to use for the Azure CLI -# Optional pipeline variables: -# - RESOURCE_GROUP - name of the Azure resource group to use for the compute galleries, defaults to "cluster-api-gallery" -# - STAGING_GALLERY_NAME - name of the Azure compute gallery for initial image publishing, defaults to "staging_gallery" - -jobs: -- job: clean_sig - timeoutInMinutes: 120 - strategy: - maxParallel: 0 - pool: - name: $(BUILD_POOL) - steps: - - task: DownloadPipelineArtifact@2 - inputs: - source: current - artifact: publishing-info - path: $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/ - - script: | - set -euo pipefail - [[ -n ${DEBUG:-} ]] && set -o xtrace - - MANAGED_IMAGE_ID=$(jq -r .managed_image_id $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) - SHARED_IMAGE_GALLERY_IMAGE_NAME=$(jq -r .shared_image_gallery_image_name $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) - SHARED_IMAGE_GALLERY_IMAGE_VERSION=$(jq -r .shared_image_gallery_image_version $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) - - echo "##vso[task.setvariable variable=MANAGED_IMAGE_ID]$MANAGED_IMAGE_ID" - echo "##vso[task.setvariable variable=SHARED_IMAGE_GALLERY_IMAGE_NAME]$SHARED_IMAGE_GALLERY_IMAGE_NAME" - echo "##vso[task.setvariable variable=SHARED_IMAGE_GALLERY_IMAGE_VERSION]$SHARED_IMAGE_GALLERY_IMAGE_VERSION" - displayName: Import variables from build SIG job - - task: AzureCLI@2 - displayName: Clean up staging resources - inputs: - azureSubscription: '$(SERVICE_CONNECTION)' - scriptLocation: inlineScript - scriptType: bash - workingDirectory: '$(system.defaultWorkingDirectory)/images/capi' - inlineScript: | - set -euo pipefail - [[ -n ${DEBUG:-} ]] && set -o xtrace - - GALLERY_NAME="${STAGING_GALLERY_NAME:-staging_gallery}" - RESOURCE_GROUP="${RESOURCE_GROUP:-cluster-api-gallery}" - - # Delete the source managed image if it exists - if az image show --ids ${MANAGED_IMAGE_ID} -o none 2>/dev/null; then - az image delete --ids ${MANAGED_IMAGE_ID} - fi - - # Delete the staging image version if it exists - if az sig image-version show --resource-group ${RESOURCE_GROUP} --gallery-name ${GALLERY_NAME} --gallery-image-definition ${SHARED_IMAGE_GALLERY_IMAGE_NAME} --gallery-image-version ${SHARED_IMAGE_GALLERY_IMAGE_VERSION} -o none 2>/dev/null; then - az sig image-version delete \ - --resource-group ${RESOURCE_GROUP} \ - --gallery-name ${GALLERY_NAME} \ - --gallery-image-definition ${SHARED_IMAGE_GALLERY_IMAGE_NAME} \ - --gallery-image-version ${SHARED_IMAGE_GALLERY_IMAGE_VERSION} - fi diff --git a/images/capi/packer/azure/.pipelines/k8s-config.yaml b/images/capi/packer/azure/.pipelines/k8s-config.yaml deleted file mode 100644 index b9ded3ffa6..0000000000 --- a/images/capi/packer/azure/.pipelines/k8s-config.yaml +++ /dev/null @@ -1,10 +0,0 @@ -steps: -- script: | - KUBERNETES_RELEASE=$(echo ${KUBERNETES_VERSION} | cut -d "." -f -2) - sed -i "s/^ \"kubernetes_series\".*/ \"kubernetes_series\": \"v${KUBERNETES_RELEASE}\",/g" kubernetes.json - sed -i "s/^ \"kubernetes_semver\".*/ \"kubernetes_semver\": \"v${KUBERNETES_VERSION}\",/g" kubernetes.json - sed -i "s/^ \"kubernetes_rpm_version\".*/ \"kubernetes_rpm_version\": \"${KUBERNETES_VERSION}\",/g" kubernetes.json - sed -i "s/^ \"kubernetes_deb_version\".*/ \"kubernetes_deb_version\": \"${KUBERNETES_VERSION}-1.1\",/g" kubernetes.json - cat kubernetes.json - displayName: Write configuration files - workingDirectory: '$(system.defaultWorkingDirectory)/images/capi/packer/config' diff --git a/images/capi/packer/azure/.pipelines/promote-sig.yaml b/images/capi/packer/azure/.pipelines/promote-sig.yaml deleted file mode 100644 index d99d7ed1a5..0000000000 --- a/images/capi/packer/azure/.pipelines/promote-sig.yaml +++ /dev/null @@ -1,144 +0,0 @@ -# Required pipeline variables: -# - BUILD_POOL - Azure DevOps build pool to use -# - SERVICE_CONNECTION - Azure DevOps service connection to use for the Azure CLI -# - EULA_LINK - the URL to the EULA for the image -# - PUBLISHER_EMAIL - the email for the image publisher -# - PUBLISHER_URI - the URI for the image publisher -# - SIG_PUBLISHER - the publisher for the image definition -# Optional pipeline variables: -# - GALLERY_DESCRIPTION - the description for the image gallery, defaults to `Shared image gallery for Cluster API Provider Azure` -# - GALLERY_NAME - name of the Azure community gallery for final image publishing, defaults to `community_gallery` -# - PUBLIC_NAME_PREFIX - the prefix for the community gallery name, defaults to `ClusterAPI` -# - REPLICATED_REGIONS - the regions to replicate the image to, defaults to the location of the managed image -# - RESOURCE_GROUP - name of the Azure resource group to use for the compute galleries, defaults to `cluster-api-gallery` -# - SIG_OFFER - the name of the offer to attach to image definitions, defaults to `reference-images` - -jobs: -- job: publish_to_sig - timeoutInMinutes: 120 - strategy: - maxParallel: 0 - pool: - name: $(BUILD_POOL) - steps: - - task: DownloadPipelineArtifact@2 - inputs: - source: current - artifact: publishing-info - path: $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/ - - script: | - set -euo pipefail - [[ -n ${DEBUG:-} ]] && set -o xtrace - - DISTRO=$(jq -r .distro $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) - HYPERV_GEN=$(jq -r .hyperv_gen $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) - OS_TYPE=$(jq -r .os_type $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) - MANAGED_IMAGE_RESOURCE_GROUP_NAME=$(jq -r .managed_image_resource_group_name $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) - MANAGED_IMAGE_NAME=$(jq -r .managed_image_name $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) - MANAGED_IMAGE_ID=$(jq -r .managed_image_id $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) - MANAGED_IMAGE_LOCATION=$(jq -r .managed_image_location $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) - MANAGED_IMAGE_SHARED_IMAGE_GALLERY_ID=$(jq -r .managed_image_shared_image_gallery_id $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) - SHARED_IMAGE_GALLERY_RESOURCE_GROUP=$(jq -r .shared_image_gallery_resource_group $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) - SHARED_IMAGE_GALLERY_NAME=$(jq -r .shared_image_gallery_name $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) - SHARED_IMAGE_GALLERY_IMAGE_NAME=$(jq -r .shared_image_gallery_image_name $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) - SHARED_IMAGE_GALLERY_IMAGE_VERSION=$(jq -r .shared_image_gallery_image_version $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) - TAGS=$(jq -r .tags $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) - - set +o xtrace - echo "##vso[task.setvariable variable=DISTRO]$DISTRO" - echo "##vso[task.setvariable variable=HYPERV_GEN]$HYPERV_GEN" - echo "##vso[task.setvariable variable=OS_TYPE]$OS_TYPE" - echo "##vso[task.setvariable variable=MANAGED_IMAGE_RESOURCE_GROUP_NAME]$MANAGED_IMAGE_RESOURCE_GROUP_NAME" - echo "##vso[task.setvariable variable=MANAGED_IMAGE_NAME]$MANAGED_IMAGE_NAME" - echo "##vso[task.setvariable variable=MANAGED_IMAGE_ID]$MANAGED_IMAGE_ID" - echo "##vso[task.setvariable variable=MANAGED_IMAGE_LOCATION]$MANAGED_IMAGE_LOCATION" - echo "##vso[task.setvariable variable=MANAGED_IMAGE_SHARED_IMAGE_GALLERY_ID]$MANAGED_IMAGE_SHARED_IMAGE_GALLERY_ID" - echo "##vso[task.setvariable variable=SHARED_IMAGE_GALLERY_RESOURCE_GROUP]$SHARED_IMAGE_GALLERY_RESOURCE_GROUP" - echo "##vso[task.setvariable variable=SHARED_IMAGE_GALLERY_NAME]$SHARED_IMAGE_GALLERY_NAME" - echo "##vso[task.setvariable variable=SHARED_IMAGE_GALLERY_IMAGE_NAME]$SHARED_IMAGE_GALLERY_IMAGE_NAME" - echo "##vso[task.setvariable variable=SHARED_IMAGE_GALLERY_IMAGE_VERSION]$SHARED_IMAGE_GALLERY_IMAGE_VERSION" - echo "##vso[task.setvariable variable=TAGS]$TAGS" - displayName: Import variables from build SIG job - - task: AzureCLI@2 - displayName: Publish to community gallery - inputs: - azureSubscription: '$(SERVICE_CONNECTION)' - scriptLocation: inlineScript - scriptType: bash - workingDirectory: '$(system.defaultWorkingDirectory)/images/capi' - inlineScript: | - set -euo pipefail - [[ -n ${DEBUG:-} ]] && set -o xtrace - - EOL_DATE=$(date --date='+6 months' +"%Y-%m-%dT00:00:00+00:00") - GALLERY_DESCRIPTION=${GALLERY_DESCRIPTION:-"Shared image gallery for Cluster API Provider Azure"} - GALLERY_NAME=${GALLERY_NAME:-community_gallery} - PUBLIC_NAME_PREFIX=${PUBLIC_NAME_PREFIX:-ClusterAPI} - REPLICATED_REGIONS="${REPLICATED_REGIONS:-${MANAGED_IMAGE_LOCATION} australiaeast canadacentral eastus eastus2 eastus2euap francecentral germanywestcentral northeurope switzerlandnorth uksouth westeurope}" - RESOURCE_GROUP="${RESOURCE_GROUP:-cluster-api-gallery}" - SIG_OFFER="${SIG_OFFER:-reference-images}" - - # Create the resource group if needed - if ! az group show -n ${RESOURCE_GROUP} -o none 2>/dev/null; then - az group create -n ${RESOURCE_GROUP} -l ${MANAGED_IMAGE_LOCATION} --tags ${TAGS:-} - fi - - # Create the public community shared image gallery if it doesn't exist - if ! az sig show --gallery-name ${GALLERY_NAME} --resource-group ${RESOURCE_GROUP} -o none 2>/dev/null; then - az sig create \ - --gallery-name ${GALLERY_NAME} \ - --resource-group ${RESOURCE_GROUP} \ - --description ${GALLERY_DESCRIPTION} \ - --eula ${EULA_LINK} \ - --location ${MANAGED_IMAGE_LOCATION} \ - --public-name-prefix ${PUBLIC_NAME_PREFIX} \ - --publisher-email ${PUBLISHER_EMAIL} \ - --publisher-uri ${PUBLISHER_URI} \ - --tags ${TAGS} \ - --permissions Community - fi - - # translate prohibited words to alternatives in the image definition name - GALLERY_IMAGE_DEFINITION=${SHARED_IMAGE_GALLERY_IMAGE_NAME//ubuntu/ubun2} - GALLERY_IMAGE_DEFINITION=${GALLERY_IMAGE_DEFINITION//windows/win} - # Create image definition if it doesn't exist - if ! az sig image-definition show --gallery-name ${GALLERY_NAME} --gallery-image-definition ${GALLERY_IMAGE_DEFINITION} --resource-group ${RESOURCE_GROUP} -o none 2>/dev/null; then - az sig image-definition create \ - --resource-group ${RESOURCE_GROUP} \ - --gallery-name ${GALLERY_NAME} \ - --gallery-image-definition ${GALLERY_IMAGE_DEFINITION} \ - --publisher ${SIG_PUBLISHER} \ - --offer ${SIG_OFFER} \ - --sku ${DISTRO} \ - --hyper-v-generation ${HYPERV_GEN} \ - --os-type ${OS_TYPE} \ - | tee -a sig-publishing.json - fi - - # Delete the image version if it exists (always create a new image, overwriting if necessary) - if az sig image-version show --gallery-name ${GALLERY_NAME} --gallery-image-definition ${GALLERY_IMAGE_DEFINITION} --gallery-image-version ${SHARED_IMAGE_GALLERY_IMAGE_VERSION} --resource-group ${RESOURCE_GROUP} -o none 2>/dev/null; then - az sig image-version delete \ - --resource-group ${RESOURCE_GROUP} \ - --gallery-name ${GALLERY_NAME} \ - --gallery-image-definition ${GALLERY_IMAGE_DEFINITION} \ - --gallery-image-version ${SHARED_IMAGE_GALLERY_IMAGE_VERSION} - fi - - # Copy the tags from the managed image to the image version - IMAGE_TAGS=$(az tag list --resource-id ${MANAGED_IMAGE_ID} | jq -r '.properties.tags | to_entries | map("\(.key)=\(.value)") | join(" ")') - - # Create the image version - az sig image-version create \ - --resource-group ${RESOURCE_GROUP} \ - --gallery-name ${GALLERY_NAME} \ - --gallery-image-definition ${GALLERY_IMAGE_DEFINITION} \ - --gallery-image-version ${SHARED_IMAGE_GALLERY_IMAGE_VERSION} \ - --target-regions ${REPLICATED_REGIONS} \ - --managed-image "${MANAGED_IMAGE_ID}" \ - --end-of-life-date ${EOL_DATE} \ - --tags ${IMAGE_TAGS} \ - | tee -a sig-publishing.json - - task: PublishPipelineArtifact@1 - inputs: - artifact: 'sig-publishing' - path: '$(system.defaultWorkingDirectory)/images/capi/sig-publishing.json' diff --git a/images/capi/packer/azure/.pipelines/sig-publishing-info.yaml b/images/capi/packer/azure/.pipelines/sig-publishing-info.yaml deleted file mode 100644 index 6f805f3fe6..0000000000 --- a/images/capi/packer/azure/.pipelines/sig-publishing-info.yaml +++ /dev/null @@ -1,42 +0,0 @@ -steps: -- script: | - set -euo pipefail - [[ -n ${DEBUG:-} ]] && set -o xtrace - - PACKER_OUTPUT=packer/azure/packer.out - OS_TYPE=$(sed -n 's/^OSType: \(.*\)/\1/p' $PACKER_OUTPUT | tail -1) - MANAGED_IMAGE_RESOURCE_GROUP_NAME=$(sed -n "s/^ManagedImageResourceGroupName: \(.*\)/\1/p" $PACKER_OUTPUT | tail -1) - MANAGED_IMAGE_NAME=$(sed -n "s/^ManagedImageName: \(.*\)/\1/p" $PACKER_OUTPUT | tail -1) - MANAGED_IMAGE_ID=$(sed -n "s/^ManagedImageId: \(.*\)/\1/p" $PACKER_OUTPUT | tail -1) - MANAGED_IMAGE_LOCATION=$(sed -n "s/^ManagedImageLocation: \(.*\)/\1/p" $PACKER_OUTPUT | tail -1) - MANAGED_IMAGE_SHARED_IMAGE_GALLERY_ID=$(sed -n "s/^ManagedImageSharedImageGalleryId: \(.*\)/\1/p" $PACKER_OUTPUT | tail -1) - SHARED_IMAGE_GALLERY_RESOURCE_GROUP=$(sed -n "s/^SharedImageGalleryResourceGroup: \(.*\)/\1/p" $PACKER_OUTPUT | tail -1) - SHARED_IMAGE_GALLERY_NAME=$(sed -n "s/^SharedImageGalleryName: \(.*\)/\1/p" $PACKER_OUTPUT | tail -1) - SHARED_IMAGE_GALLERY_IMAGE_NAME=$(sed -n "s/^SharedImageGalleryImageName: \(.*\)/\1/p" $PACKER_OUTPUT | tail -1) - SHARED_IMAGE_GALLERY_IMAGE_VERSION=$(sed -n "s/^SharedImageGalleryImageVersion: \(.*\)/\1/p" $PACKER_OUTPUT | tail -1) - TAGS=$(cat packer/azure/tags.out) - if [[ SHARED_IMAGE_GALLERY_IMAGE_NAME == *gen2 ]]; then - HYPERV_GEN="V2" - else - HYPERV_GEN="V1" - fi - - cat < packer/azure/sig-publishing-info.json - { - "distro": "${DISTRO}", - "hyperv_gen": "${HYPERV_GEN}", - "os_type": "${OS_TYPE}", - "managed_image_resource_group_name": "${MANAGED_IMAGE_RESOURCE_GROUP_NAME}", - "managed_image_name": "${MANAGED_IMAGE_NAME}", - "managed_image_id": "${MANAGED_IMAGE_ID}", - "managed_image_location": "${MANAGED_IMAGE_LOCATION}", - "managed_image_shared_image_gallery_id": "${MANAGED_IMAGE_SHARED_IMAGE_GALLERY_ID}", - "shared_image_gallery_resource_group": "${SHARED_IMAGE_GALLERY_RESOURCE_GROUP}", - "shared_image_gallery_name": "${SHARED_IMAGE_GALLERY_NAME}", - "shared_image_gallery_image_name": "${SHARED_IMAGE_GALLERY_IMAGE_NAME}", - "shared_image_gallery_image_version": "${SHARED_IMAGE_GALLERY_IMAGE_VERSION}", - "tags": "${TAGS}" - } - EOF - displayName: Generate SIG publishing info - workingDirectory: '$(system.defaultWorkingDirectory)/images/capi' diff --git a/images/capi/packer/azure/.pipelines/stages.yaml b/images/capi/packer/azure/.pipelines/stages.yaml deleted file mode 100644 index a40cb27dfd..0000000000 --- a/images/capi/packer/azure/.pipelines/stages.yaml +++ /dev/null @@ -1,33 +0,0 @@ -# Required pipeline variables: -# - BUILD_POOL - Azure DevOps build pool to use -# - KUBERNETES_VERSION - version of Kubernetes to build the image with, e.g. `1.31.1` -# - OFFER - the name of the offer to create the sku for -# - OS - target of build, one of `Ubuntu` or `Windows` -# - OS_VERSION - target of build, one of `24.04`, `22.04`, `2022-containerd`, or `2019-containerd` -# - PUBLISHER - the name of the publisher to create the sku for -# - RESOURCE_GROUP - name of the Azure resource group to use for the Compute galleries -# - SERVICE_CONNECTION - Azure DevOps service connection to use for the Azure CLI -# - STAGING_GALLERY_NAME - name of the Azure Compute Gallery for initial image publishing - -trigger: none -pr: none - -stages: - - stage: build - jobs: - - template: build-sig.yaml - - - stage: test - condition: not(always()) # skip for now - jobs: - - template: test-sig.yaml - - - stage: promote - condition: not(or(failed(), canceled())) - jobs: - - template: promote-sig.yaml - - - stage: clean - condition: always() - jobs: - - template: clean-sig.yaml diff --git a/images/capi/packer/azure/.pipelines/test-sig.yaml b/images/capi/packer/azure/.pipelines/test-sig.yaml deleted file mode 100644 index 2b89276cea..0000000000 --- a/images/capi/packer/azure/.pipelines/test-sig.yaml +++ /dev/null @@ -1,142 +0,0 @@ -# Required pipeline variables: -# - BUILD_POOL - Azure DevOps build pool to use -# - AZ_CAPI_EXTENSION_URL - URL to the Azure CAPI extension build. -# - KUBERNETES_VERSION - version of Kubernetes to build the image with, e.g. `1.16.2` -# - OS - target of build e.g. `Ubuntu/Windows` -# - OS_VERSION - target of build e.g. `22.04/2004/2019` -# - SERVICE_CONNECTION - Azure DevOps service connection to use for the Azure CLI - -jobs: -- job: test_sig - timeoutInMinutes: 120 - strategy: - maxParallel: 0 - pool: - name: $(BUILD_POOL) - steps: - - task: DownloadPipelineArtifact@2 - inputs: - source: current - artifact: publishing-info - path: $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/ - - script: | - set -euo pipefail - [[ -n ${DEBUG:-} ]] && set -o xtrace - - OS_TYPE=$(jq -r .os_type $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) - MANAGED_IMAGE_RESOURCE_GROUP_NAME=$(jq -r .managed_image_resource_group_name $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) - MANAGED_IMAGE_NAME=$(jq -r .managed_image_name $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) - MANAGED_IMAGE_ID=$(jq -r .managed_image_id $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) - MANAGED_IMAGE_LOCATION=$(jq -r .managed_image_location $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) - MANAGED_IMAGE_SHARED_IMAGE_GALLERY_ID=$(jq -r .managed_image_shared_image_gallery_id $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) - SHARED_IMAGE_GALLERY_RESOURCE_GROUP=$(jq -r .shared_image_gallery_resource_group $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) - SHARED_IMAGE_GALLERY_NAME=$(jq -r .shared_image_gallery_name $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) - SHARED_IMAGE_GALLERY_IMAGE_NAME=$(jq -r .shared_image_gallery_image_name $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) - SHARED_IMAGE_GALLERY_IMAGE_VERSION=$(jq -r .shared_image_gallery_image_version $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) - SHARED_IMAGE_GALLERY_REPLICATED_REGIONS=$(jq -r .shared_image_gallery_replicated_regions $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) - TAGS=$(jq -r .tags $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) - - echo "##vso[task.setvariable variable=OS_TYPE;]$OS_TYPE" - echo "##vso[task.setvariable variable=MANAGED_IMAGE_RESOURCE_GROUP_NAME;]$MANAGED_IMAGE_RESOURCE_GROUP_NAME" - echo "##vso[task.setvariable variable=MANAGED_IMAGE_NAME;]$MANAGED_IMAGE_NAME" - echo "##vso[task.setvariable variable=MANAGED_IMAGE_ID;]$MANAGED_IMAGE_ID" - echo "##vso[task.setvariable variable=MANAGED_IMAGE_LOCATION;]$MANAGED_IMAGE_LOCATION" - echo "##vso[task.setvariable variable=MANAGED_IMAGE_SHARED_IMAGE_GALLERY_ID;]$MANAGED_IMAGE_SHARED_IMAGE_GALLERY_ID" - echo "##vso[task.setvariable variable=SHARED_IMAGE_GALLERY_RESOURCE_GROUP;]$SHARED_IMAGE_GALLERY_RESOURCE_GROUP" - echo "##vso[task.setvariable variable=SHARED_IMAGE_GALLERY_NAME;]$SHARED_IMAGE_GALLERY_NAME" - echo "##vso[task.setvariable variable=SHARED_IMAGE_GALLERY_IMAGE_NAME;]$SHARED_IMAGE_GALLERY_IMAGE_NAME" - echo "##vso[task.setvariable variable=SHARED_IMAGE_GALLERY_IMAGE_VERSION;]$SHARED_IMAGE_GALLERY_IMAGE_VERSION" - echo "##vso[task.setvariable variable=SHARED_IMAGE_GALLERY_REPLICATED_REGIONS;]$SHARED_IMAGE_GALLERY_REPLICATED_REGIONS" - echo "##vso[task.setvariable variable=TAGS;]$TAGS" - displayName: Import variables from build SIG job - - template: k8s-config.yaml - - script: | - set -euo pipefail - [[ -n ${DEBUG:-} ]] && set -o xtrace - - export PATH=${PATH}:.local/bin - ./packer/azure/scripts/ensure-kustomize.sh - - # Generate cluster template with kustomize - if [ "$OS_TYPE" == "Windows" ]; then - kustomize build --load-restrictor LoadRestrictionsNone $(system.defaultWorkingDirectory)/images/capi/packer/azure/scripts/test-templates/windows/ > $(system.defaultWorkingDirectory)/images/capi/packer/azure/scripts/test-templates/cluster-template.yaml - else - kustomize build --load-restrictor LoadRestrictionsNone $(system.defaultWorkingDirectory)/images/capi/packer/azure/scripts/test-templates/linux/ > $(system.defaultWorkingDirectory)/images/capi/packer/azure/scripts/test-templates/cluster-template.yaml - fi - TEST_TEMPLATE=$(system.defaultWorkingDirectory)/images/capi/packer/azure/scripts/test-templates/cluster-template.yaml - echo "##vso[task.setvariable variable=TEST_TEMPLATE;]$TEST_TEMPLATE" - displayName: generate cluster template - workingDirectory: '$(system.defaultWorkingDirectory)/images/capi' - - task: PipAuthenticate@1 - inputs: - artifactFeeds: 'AzureContainerUpstream' - onlyAddExtraIndex: true - - script: | - set -euo pipefail - [[ -n ${DEBUG:-} ]] && set -o xtrace - - os=$(echo "$OS_TYPE" | tr '[:upper:]' '[:lower:]') - - # Set up the Azure CLI Cluster API extension - # For example, https://github.com/Azure/azure-capi-cli-extension/releases/download/az-capi-nightly/capi-0.0.vnext-py2.py3-none-any.whl - az extension add --yes --source "${AZ_CAPI_EXTENSION_URL}" - - # Install required binaries - mkdir ~/test-binaries - export PATH=${PATH}:~/test-binaries - az capi install -a -ip ~/test-binaries - - echo "##vso[task.setvariable variable=PATH;]$PATH" - displayName: Install and configure az capi extension - - task: AzureCLI@2 - displayName: Create a cluster - inputs: - azureSubscription: '$(SERVICE_CONNECTION)' - scriptLocation: inlineScript - scriptType: bash - workingDirectory: '$(system.defaultWorkingDirectory)/images/capi' - inlineScript: | - set -euo pipefail - [[ -n ${DEBUG:-} ]] && set -o xtrace - - params=() - if [ "$OS_TYPE" == "Windows" ]; then - params+=(--windows) - fi - - RESOURCE_GROUP=${MANAGED_IMAGE_RESOURCE_GROUP_NAME} - AZURE_LOCATION=${MANAGED_IMAGE_LOCATION} - # Create a cluster - az capi create \ - --yes \ - --debug \ - --name testvm \ - --kubernetes-version="${KUBERNETES_VERSION}" \ - --location="${AZURE_LOCATION}" \ - --resource-group="${RESOURCE_GROUP}" \ - --management-cluster-resource-group-name="${RESOURCE_GROUP}" \ - --control-plane-machine-count=1 \ - --node-machine-count=1 \ - --template="${TEST_TEMPLATE}" \ - --tags="${TAGS}" \ - --wait-for-nodes=2 \ - "${params[@]}" - - # test if the vm's provisionState is "Succeeded" otherwise fail - # even though the node is reporting Ready, it still takes a moment for the Azure VM to go to Succeeded - timeout 60s bash -c "while ! az vm list -g ${RESOURCE_GROUP} | jq -e 'all(.provisioningState == \"Succeeded\")'; do sleep 1; done" - - task: AzureCLI@2 - displayName: Clean up test resource group - inputs: - azureSubscription: '$(SERVICE_CONNECTION)' - scriptLocation: inlineScript - scriptType: bash - workingDirectory: '$(system.defaultWorkingDirectory)/images/capi' - inlineScript: | - set -euo pipefail - [[ -n ${DEBUG:-} ]] && set -o xtrace - - # Clean up the test resource group - RESOURCE_GROUP=${MANAGED_IMAGE_RESOURCE_GROUP_NAME} - echo az group delete -n "${RESOURCE_GROUP}" --yes --no-wait - condition: always() diff --git a/images/capi/packer/azure/azurelinux-3-gen2.json b/images/capi/packer/azure/azurelinux-3-gen2.json index e10bf2671f..217f4d145d 100644 --- a/images/capi/packer/azure/azurelinux-3-gen2.json +++ b/images/capi/packer/azure/azurelinux-3-gen2.json @@ -5,5 +5,6 @@ "distribution_version": "3", "image_offer": "azure-linux-3", "image_publisher": "MicrosoftCBLMariner", - "image_sku": "azure-linux-3-gen2" + "image_sku": "azure-linux-3-gen2", + "os_disk_size_gb": "20" } diff --git a/images/capi/packer/azure/azurelinux-3.json b/images/capi/packer/azure/azurelinux-3.json index bafd90c669..f8dfd3752b 100644 --- a/images/capi/packer/azure/azurelinux-3.json +++ b/images/capi/packer/azure/azurelinux-3.json @@ -5,5 +5,6 @@ "distribution_version": "3", "image_offer": "azure-linux-3", "image_publisher": "MicrosoftCBLMariner", - "image_sku": "azure-linux-3" + "image_sku": "azure-linux-3", + "os_disk_size_gb": "20" } diff --git a/images/capi/packer/azure/flatcar.json b/images/capi/packer/azure/flatcar.json index 392e43f647..98b256a1d1 100644 --- a/images/capi/packer/azure/flatcar.json +++ b/images/capi/packer/azure/flatcar.json @@ -1,7 +1,6 @@ { "ansible_extra_vars": "ansible_python_interpreter=/opt/pypy/bin/pypy", "build_name": "flatcar", - "crictl_source_type": "http", "custom_data_file": "packer/files/flatcar/ignition/bootstrap-cloud.json", "distribution": "flatcar", "distribution_release": "{{env `FLATCAR_CHANNEL`}}", diff --git a/images/capi/packer/azure/packer-windows.json b/images/capi/packer/azure/packer-windows.json index 482c3b2644..2acc35d04c 100644 --- a/images/capi/packer/azure/packer-windows.json +++ b/images/capi/packer/azure/packer-windows.json @@ -139,7 +139,7 @@ "--extra-vars", "gmsa_keyvault_url={{user `gmsa_keyvault_url`}}" ], - "max_retries": 5, + "max_retries": 1, "pause_before": "15s", "playbook_file": "ansible/windows/node_windows.yml", "type": "ansible", @@ -177,6 +177,7 @@ "vars_inline": { "OS": "{{user `distribution` | lower}}", "PROVIDER": "azure", + "containerd_image_pull_progress_timeout": "{{user `containerd_image_pull_progress_timeout`}}", "containerd_version": "{{user `containerd_version`}}", "distribution_version": "{{user `distribution_version`}}", "kubernetes_version": "{{user `kubernetes_semver`}}", @@ -217,6 +218,7 @@ "cloudbase_plugins": "cloudbaseinit.plugins.windows.createuser.CreateUserPlugin, cloudbaseinit.plugins.common.setuserpassword.SetUserPasswordPlugin, cloudbaseinit.plugins.windows.extendvolumes.ExtendVolumesPlugin, cloudbaseinit.plugins.common.userdata.UserDataPlugin, cloudbaseinit.plugins.common.ephemeraldisk.EphemeralDiskPlugin, cloudbaseinit.plugins.windows.azureguestagent.AzureGuestAgentPlugin, cloudbaseinit.plugins.common.mtu.MTUPlugin, cloudbaseinit.plugins.common.sethostname.SetHostNamePlugin", "cloudbase_plugins_unattend": "cloudbaseinit.plugins.common.mtu.MTUPlugin", "community_gallery_image_id": "", + "containerd_image_pull_progress_timeout": null, "containerd_version": null, "direct_shared_gallery_image_id": "", "exclude_from_latest": "false", @@ -226,7 +228,7 @@ "image_publisher": "", "image_sku": "", "image_version": "latest", - "kubernetes_base_url": "https://kubernetesreleases.blob.core.windows.net/kubernetes/{{user `kubernetes_semver`}}/binaries/node/windows/{{user `kubernetes_goarch`}}", + "kubernetes_base_url": "https://kubernetesreleases.blob.core.windows.net/kubernetes/{{user `kubernetes_semver`}}/bin/windows/{{user `kubernetes_goarch`}}", "manifest_output": "manifest.json", "nssm_url": null, "os_disk_size_gb": "", diff --git a/images/capi/packer/azure/packer.json b/images/capi/packer/azure/packer.json index ecffb13e2f..84c55d70b8 100644 --- a/images/capi/packer/azure/packer.json +++ b/images/capi/packer/azure/packer.json @@ -69,7 +69,6 @@ }, "polling_duration_timeout": "60m", "private_virtual_network_with_public_ip": "{{user `private_virtual_network_with_public_ip`}}", - "public_ip_sku": "Standard", "shared_gallery_image_version_exclude_from_latest": "{{ user `exclude_from_latest` }}", "shared_image_gallery": { "community_gallery_image_id": "{{ user `community_gallery_image_id` }}", @@ -181,6 +180,7 @@ "PROVIDER": "azure", "containerd_gvisor_runtime": "{{user `containerd_gvisor_runtime`}}", "containerd_gvisor_version": "{{user `containerd_gvisor_version`}}", + "containerd_image_pull_progress_timeout": "{{user `containerd_image_pull_progress_timeout`}}", "containerd_version": "{{user `containerd_version`}}", "containerd_wasm_shims_runtimes": "{{user `containerd_wasm_shims_runtimes` }}", "kubernetes_cni_deb_version": "{{ user `kubernetes_cni_deb_version` }}", @@ -222,6 +222,7 @@ "community_gallery_image_id": "", "containerd_gvisor_runtime": "false", "containerd_gvisor_version": "latest", + "containerd_image_pull_progress_timeout": null, "containerd_service_url": "https://raw.githubusercontent.com/containerd/containerd/refs/tags/v{{user `containerd_version`}}/containerd.service", "containerd_version": null, "containerd_wasm_shims_runtimes": null, diff --git a/images/capi/packer/azure/rhel-8.json b/images/capi/packer/azure/rhel-8.json deleted file mode 100644 index 6796dc42f9..0000000000 --- a/images/capi/packer/azure/rhel-8.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "build_name": "rhel-8", - "distribution": "rhel", - "distribution_release": "rhel-8", - "distribution_version": "8", - "epel_rpm_gpg_key": "https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-8", - "image_offer": "RHEL", - "image_publisher": "RedHat", - "image_sku": "8_7", - "redhat_epel_rpm": "https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm" -} diff --git a/images/capi/packer/azure/scripts/init-sig.sh b/images/capi/packer/azure/scripts/init-sig.sh index 730c55b943..4fde65c3d7 100755 --- a/images/capi/packer/azure/scripts/init-sig.sh +++ b/images/capi/packer/azure/scripts/init-sig.sh @@ -58,71 +58,6 @@ SECURITY_TYPE_CVM_SUPPORTED_FEATURE="SecurityType=ConfidentialVmSupported" SIG_TARGET=$1 -# Accept Azure VM image terms if available and required -accept_image_terms() { - # SIG_TARGET is expected to be a global variable - if [[ -z "$SIG_TARGET" ]]; then - echo "Error: SIG_TARGET is not set. Exiting." - exit 1 - fi - # AZURE_LOCATION is expected to be a global variable - if [[ -z "$AZURE_LOCATION" ]]; then - echo "Error: AZURE_LOCATION is not set. Exiting." - exit 1 - fi - - # Resolve the JSON file path and extract necessary fields - target_json="$(realpath "packer/azure/${SIG_TARGET}.json")" - distribution=$(jq -r '.distribution' "$target_json") - distribution_version=$(jq -r '.distribution_version' "$target_json") - - # Return early if not a Windows distribution - if [[ "$distribution" != "windows" ]]; then - return - fi - - # Extract purchase plan details - plan_publisher=$(jq -r '.plan_image_publisher' "$target_json") - plan_offer=$(jq -r '.plan_image_offer' "$target_json") - plan_name=$(jq -r '.plan_image_sku' "$target_json") - plan_version=${PLAN_VERSION:-"latest"} - - # Proceed only if all plan details are valid - if [[ "$plan_publisher" == "null" || "$plan_offer" == "null" || "$plan_name" == "null" ]]; then - echo "Purchase plan details are missing. Skipping terms acceptance." - return - fi - - # Populate the global plan_args variable - PLAN_ARGS=( - --plan-name "${plan_name}" - --plan-product "${plan_offer}" - --plan-publisher "${plan_publisher}" - ) - - plan_urn="${plan_publisher}:${plan_offer}:${plan_name}:${plan_version}" - - # Check if the image has terms to accept - if [[ "$(az vm image show --location "$AZURE_LOCATION" --urn "${plan_urn}" -o json | jq -r '.plan')" == "null" ]]; then - echo "Image '${plan_urn}' has no terms to accept." - return - fi - - echo "Plan info: ${plan_urn}" - - # Check acceptance status and accept terms if not already accepted - if [[ "$(az vm image terms show --urn "$plan_urn" -o json | jq -r '.accepted')" == "true" ]]; then - echo "Terms for image URN: ${plan_urn} are already accepted." - return - fi - - echo "Accepting terms for image URN: ${plan_urn}" - az vm image terms accept --urn "$plan_urn" -} - -PLAN_ARGS=() -accept_image_terms - # Create a shared image gallery image definition if it does not exist create_image_definition() { if ! az sig image-definition show --gallery-name ${GALLERY_NAME} --gallery-image-definition ${SIG_IMAGE_DEFINITION:-capi-${SIG_SKU:-$1}} --resource-group ${RESOURCE_GROUP_NAME} -o none 2>/dev/null; then @@ -135,8 +70,7 @@ create_image_definition() { --sku ${SIG_SKU:-$2} \ --hyper-v-generation ${3} \ --os-type ${4} \ - --features ${5:-''} \ - "${plan_args[@]}" # TODO: Delete this line after the image is GA + --features ${5:-''} fi } @@ -150,9 +84,6 @@ case ${SIG_TARGET} in azurelinux-3) create_image_definition ${SIG_TARGET} "azurelinux-3" "V1" "Linux" ;; - rhel-8) - create_image_definition "rhel-8" "rhel-8" "V1" "Linux" - ;; windows-2019-containerd) create_image_definition ${SIG_TARGET} "win-2019-containerd" "V1" "Windows" ;; diff --git a/images/capi/packer/azure/scripts/sysprep.ps1 b/images/capi/packer/azure/scripts/sysprep.ps1 index a540be4c7b..48a7e6eae5 100644 --- a/images/capi/packer/azure/scripts/sysprep.ps1 +++ b/images/capi/packer/azure/scripts/sysprep.ps1 @@ -16,8 +16,13 @@ # The Windows Azure Guest Agent is required for sysprep: https://www.packer.io/docs/builders/azure/arm#windows Write-Output '>>> Waiting for GA Service (RdAgent) to start ...' while ((Get-Service RdAgent).Status -ne 'Running') { Start-Sleep -s 5 } -Write-Output '>>> Waiting for GA Service (WindowsAzureTelemetryService) to start ...' -while ((Get-Service WindowsAzureTelemetryService) -and ((Get-Service WindowsAzureTelemetryService).Status -ne 'Running')) { Start-Sleep -s 5 } +$telemetryService = Get-Service WindowsAzureTelemetryService -ErrorAction SilentlyContinue +if ($telemetryService) { + Write-Output '>>> Waiting for GA Service (WindowsAzureTelemetryService) to start ...' + while ($telemetryService.Status -ne 'Running') { Start-Sleep -s 5; $telemetryService.Refresh() } +} else { + Write-Output '>>> GA Service (WindowsAzureTelemetryService) not installed, skipping ...' +} Write-Output '>>> Waiting for GA Service (WindowsAzureGuestAgent) to start ...' while ((Get-Service WindowsAzureGuestAgent).Status -ne 'Running') { Start-Sleep -s 5 } Write-Output '>>> Sysprepping VM ...' diff --git a/images/capi/packer/azure/scripts/test-templates/linux/kustomization.yaml b/images/capi/packer/azure/scripts/test-templates/linux/kustomization.yaml deleted file mode 100644 index 26815281e7..0000000000 --- a/images/capi/packer/azure/scripts/test-templates/linux/kustomization.yaml +++ /dev/null @@ -1,7 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -resources: -- https://github.com/kubernetes-sigs/cluster-api-provider-azure/releases/download/v1.13.0/cluster-template.yaml -patches: -- path: ../patches/azuremachinetemplate-controlplane.yaml -- path: ../patches/azuremachinetemplate-workload.yaml diff --git a/images/capi/packer/azure/scripts/test-templates/patches/azuremachinetemplate-controlplane.yaml b/images/capi/packer/azure/scripts/test-templates/patches/azuremachinetemplate-controlplane.yaml deleted file mode 100644 index 53e8216b04..0000000000 --- a/images/capi/packer/azure/scripts/test-templates/patches/azuremachinetemplate-controlplane.yaml +++ /dev/null @@ -1,11 +0,0 @@ -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 -kind: AzureMachineTemplate -metadata: - name: ${CLUSTER_NAME}-control-plane - namespace: default -spec: - template: - spec: - image: - id: ${MANAGED_IMAGE_ID} ---- \ No newline at end of file diff --git a/images/capi/packer/azure/scripts/test-templates/patches/azuremachinetemplate-windows.yaml b/images/capi/packer/azure/scripts/test-templates/patches/azuremachinetemplate-windows.yaml deleted file mode 100644 index 2abc8847cc..0000000000 --- a/images/capi/packer/azure/scripts/test-templates/patches/azuremachinetemplate-windows.yaml +++ /dev/null @@ -1,11 +0,0 @@ -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 -kind: AzureMachineTemplate -metadata: - name: ${CLUSTER_NAME}-md-win - namespace: default -spec: - template: - spec: - image: - id: ${MANAGED_IMAGE_ID} ---- \ No newline at end of file diff --git a/images/capi/packer/azure/scripts/test-templates/patches/azuremachinetemplate-workload.yaml b/images/capi/packer/azure/scripts/test-templates/patches/azuremachinetemplate-workload.yaml deleted file mode 100644 index 1bc33e3d7f..0000000000 --- a/images/capi/packer/azure/scripts/test-templates/patches/azuremachinetemplate-workload.yaml +++ /dev/null @@ -1,11 +0,0 @@ -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 -kind: AzureMachineTemplate -metadata: - name: ${CLUSTER_NAME}-md-0 - namespace: default -spec: - template: - spec: - image: - id: ${MANAGED_IMAGE_ID} ---- \ No newline at end of file diff --git a/images/capi/packer/azure/scripts/test-templates/patches/kubeadmcontrolplane-windows.yaml b/images/capi/packer/azure/scripts/test-templates/patches/kubeadmcontrolplane-windows.yaml deleted file mode 100644 index 8a040a4348..0000000000 --- a/images/capi/packer/azure/scripts/test-templates/patches/kubeadmcontrolplane-windows.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: controlplane.cluster.x-k8s.io/v1beta1 -kind: KubeadmControlPlane -metadata: - name: ${CLUSTER_NAME}-control-plane - namespace: default -spec: - version: ${KUBERNETES_BOOTSTRAP_VERSION} ---- \ No newline at end of file diff --git a/images/capi/packer/azure/scripts/test-templates/patches/machinedeployment-windows.yaml b/images/capi/packer/azure/scripts/test-templates/patches/machinedeployment-windows.yaml deleted file mode 100644 index 1c66ce2fd3..0000000000 --- a/images/capi/packer/azure/scripts/test-templates/patches/machinedeployment-windows.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: cluster.x-k8s.io/v1beta1 -kind: MachineDeployment -metadata: - name: ${CLUSTER_NAME}-md-0 - namespace: default -spec: - replicas: 0 ---- \ No newline at end of file diff --git a/images/capi/packer/azure/scripts/test-templates/windows/kustomization.yaml b/images/capi/packer/azure/scripts/test-templates/windows/kustomization.yaml deleted file mode 100644 index 0ea6f474e6..0000000000 --- a/images/capi/packer/azure/scripts/test-templates/windows/kustomization.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -resources: -- https://github.com/kubernetes-sigs/cluster-api-provider-azure/releases/download/v1.13.0/cluster-template-windows.yaml -patches: -- path: ../patches/azuremachinetemplate-windows.yaml -- path: ../patches/kubeadmcontrolplane-windows.yaml -- path: ../patches/machinedeployment-windows.yaml diff --git a/images/capi/packer/config/ansible-args.json b/images/capi/packer/config/ansible-args.json index 0ba049e72f..b4f74f877f 100644 --- a/images/capi/packer/config/ansible-args.json +++ b/images/capi/packer/config/ansible-args.json @@ -1,5 +1,5 @@ { "ansible_common_ssh_args": "-o IdentitiesOnly=yes", - "ansible_common_vars": "containerd_gvisor_runtime={{user `containerd_gvisor_runtime`}} containerd_gvisor_version={{user `containerd_gvisor_version`}} containerd_sha256={{user `containerd_sha256`}} pause_image={{user `pause_image`}} containerd_additional_settings={{user `containerd_additional_settings`}} containerd_cri_socket={{user `containerd_cri_socket`}} containerd_version={{user `containerd_version`}} containerd_wasm_shims_url={{user `containerd_wasm_shims_url`}} containerd_wasm_shims_version={{user `containerd_wasm_shims_version`}} containerd_wasm_shims_sha256={{user `containerd_wasm_shims_sha256`}} containerd_wasm_shims_runtimes=\"{{user `containerd_wasm_shims_runtimes`}}\" containerd_wasm_shims_runtime_versions=\"{{user `containerd_wasm_shims_runtime_versions`}}\" crictl_version={{user `crictl_version`}} crictl_source_type={{user `crictl_source_type`}} custom_role_names=\"{{user `custom_role_names`}}\" firstboot_custom_roles_pre=\"{{user `firstboot_custom_roles_pre`}}\" firstboot_custom_roles_post=\"{{user `firstboot_custom_roles_post`}}\" node_custom_roles_pre=\"{{user `node_custom_roles_pre`}}\" node_custom_roles_post=\"{{user `node_custom_roles_post`}}\" node_custom_roles_post_sysprep=\"{{user `node_custom_roles_post_sysprep`}}\" disable_public_repos={{user `disable_public_repos`}} extra_debs=\"{{user `extra_debs`}}\" extra_repos=\"{{user `extra_repos`}}\" extra_rpms=\"{{user `extra_rpms`}}\" http_proxy={{user `http_proxy`}} https_proxy={{user `https_proxy`}} kubeadm_template={{user `kubeadm_template`}} kubernetes_apiserver_port={{user `kubernetes_apiserver_port`}} kubernetes_cni_http_source={{user `kubernetes_cni_http_source`}} kubernetes_http_source={{user `kubernetes_http_source`}} kubernetes_container_registry={{user `kubernetes_container_registry`}} kubernetes_rpm_repo={{user `kubernetes_rpm_repo`}} kubernetes_rpm_gpg_key={{user `kubernetes_rpm_gpg_key`}} kubernetes_rpm_gpg_check={{user `kubernetes_rpm_gpg_check`}} kubernetes_deb_repo={{user `kubernetes_deb_repo`}} kubernetes_deb_gpg_key={{user `kubernetes_deb_gpg_key`}} kubernetes_cni_deb_version={{user `kubernetes_cni_deb_version`}} kubernetes_cni_rpm_version={{user `kubernetes_cni_rpm_version`}} kubernetes_cni_semver={{user `kubernetes_cni_semver`}} kubernetes_cni_source_type={{user `kubernetes_cni_source_type`}} kubernetes_semver={{user `kubernetes_semver`}} kubernetes_source_type={{user `kubernetes_source_type`}} kubernetes_load_additional_imgs={{user `kubernetes_load_additional_imgs`}} kubernetes_deb_version={{user `kubernetes_deb_version`}} kubernetes_rpm_version={{user `kubernetes_rpm_version`}} no_proxy={{user `no_proxy`}} pip_conf_file={{user `pip_conf_file`}} python_path={{user `python_path`}} redhat_epel_rpm={{user `redhat_epel_rpm`}} epel_rpm_gpg_key={{user `epel_rpm_gpg_key`}} reenable_public_repos={{user `reenable_public_repos`}} remove_extra_repos={{user `remove_extra_repos`}} systemd_prefix={{user `systemd_prefix`}} sysusr_prefix={{user `sysusr_prefix`}} sysusrlocal_prefix={{user `sysusrlocal_prefix`}} load_additional_components={{ user `load_additional_components`}} additional_registry_images={{ user `additional_registry_images`}} additional_registry_images_list={{ user `additional_registry_images_list`}} ecr_credential_provider={{ user `ecr_credential_provider` }} additional_url_images={{ user `additional_url_images`}} additional_url_images_list={{ user `additional_url_images_list`}} additional_executables={{ user `additional_executables`}} additional_executables_list={{ user `additional_executables_list`}} additional_executables_destination_path={{ user `additional_executables_destination_path`}} additional_s3={{ user `additional_s3`}} build_target={{ user `build_target`}} amazon_ssm_agent_rpm={{ user `amazon_ssm_agent_rpm` }} enable_containerd_audit={{ user `enable_containerd_audit` }} kubernetes_enable_automatic_resource_sizing={{ user `kubernetes_enable_automatic_resource_sizing` }} debug_tools={{user `debug_tools`}} ubuntu_repo={{user `ubuntu_repo`}} ubuntu_security_repo={{user `ubuntu_security_repo`}} gpu_block_nouveau_loading={{user `block_nouveau_loading`}} runc_version={{user `runc_version`}} containerd_service_url={{user `containerd_service_url`}} netplan_removal_excludes=\"{{user `netplan_removal_excludes`}}\"", + "ansible_common_vars": "containerd_gvisor_runtime={{user `containerd_gvisor_runtime`}} containerd_gvisor_version={{user `containerd_gvisor_version`}} containerd_sha256={{user `containerd_sha256`}} pause_image={{user `pause_image`}} containerd_additional_settings={{user `containerd_additional_settings`}} containerd_cri_socket={{user `containerd_cri_socket`}} containerd_version={{user `containerd_version`}} containerd_image_pull_progress_timeout={{user `containerd_image_pull_progress_timeout`}} containerd_wasm_shims_url={{user `containerd_wasm_shims_url`}} containerd_wasm_shims_version={{user `containerd_wasm_shims_version`}} containerd_wasm_shims_sha256={{user `containerd_wasm_shims_sha256`}} containerd_wasm_shims_runtimes=\"{{user `containerd_wasm_shims_runtimes`}}\" containerd_wasm_shims_runtime_versions=\"{{user `containerd_wasm_shims_runtime_versions`}}\" crictl_version={{user `crictl_version`}} custom_role_names=\"{{user `custom_role_names`}}\" firstboot_custom_roles_pre=\"{{user `firstboot_custom_roles_pre`}}\" firstboot_custom_roles_post=\"{{user `firstboot_custom_roles_post`}}\" node_custom_roles_pre=\"{{user `node_custom_roles_pre`}}\" node_custom_roles_post=\"{{user `node_custom_roles_post`}}\" node_custom_roles_post_sysprep=\"{{user `node_custom_roles_post_sysprep`}}\" disable_public_repos={{user `disable_public_repos`}} extra_debs=\"{{user `extra_debs`}}\" extra_kernel_boot_params=\"{{user `extra_kernel_boot_params`}}\" extra_repos=\"{{user `extra_repos`}}\" extra_rpms=\"{{user `extra_rpms`}}\" http_proxy={{user `http_proxy`}} https_proxy={{user `https_proxy`}} kubeadm_template={{user `kubeadm_template`}} kubernetes_apiserver_port={{user `kubernetes_apiserver_port`}} kubernetes_cni_http_source={{user `kubernetes_cni_http_source`}} kubernetes_http_source={{user `kubernetes_http_source`}} kubernetes_container_registry={{user `kubernetes_container_registry`}} kubernetes_rpm_repo={{user `kubernetes_rpm_repo`}} kubernetes_rpm_gpg_key={{user `kubernetes_rpm_gpg_key`}} kubernetes_rpm_gpg_check={{user `kubernetes_rpm_gpg_check`}} kubernetes_deb_repo={{user `kubernetes_deb_repo`}} kubernetes_deb_gpg_key={{user `kubernetes_deb_gpg_key`}} kubernetes_cni_deb_version={{user `kubernetes_cni_deb_version`}} kubernetes_cni_rpm_version={{user `kubernetes_cni_rpm_version`}} kubernetes_cni_semver={{user `kubernetes_cni_semver`}} kubernetes_cni_source_type={{user `kubernetes_cni_source_type`}} kubernetes_semver={{user `kubernetes_semver`}} kubernetes_source_type={{user `kubernetes_source_type`}} kubernetes_load_additional_imgs={{user `kubernetes_load_additional_imgs`}} kubernetes_deb_version={{user `kubernetes_deb_version`}} kubernetes_rpm_version={{user `kubernetes_rpm_version`}} no_proxy={{user `no_proxy`}} pip_conf_file={{user `pip_conf_file`}} python_path={{user `python_path`}} redhat_epel_rpm={{user `redhat_epel_rpm`}} epel_rpm_gpg_key={{user `epel_rpm_gpg_key`}} reenable_public_repos={{user `reenable_public_repos`}} remove_extra_repos={{user `remove_extra_repos`}} systemd_prefix={{user `systemd_prefix`}} sysusr_prefix={{user `sysusr_prefix`}} sysusrlocal_prefix={{user `sysusrlocal_prefix`}} load_additional_components={{ user `load_additional_components`}} additional_registry_images={{ user `additional_registry_images`}} additional_registry_images_list={{ user `additional_registry_images_list`}} ecr_credential_provider={{ user `ecr_credential_provider` }} additional_url_images={{ user `additional_url_images`}} additional_url_images_list={{ user `additional_url_images_list`}} additional_executables={{ user `additional_executables`}} additional_executables_list={{ user `additional_executables_list`}} additional_executables_destination_path={{ user `additional_executables_destination_path`}} additional_s3={{ user `additional_s3`}} build_target={{ user `build_target`}} amazon_ssm_agent_rpm={{ user `amazon_ssm_agent_rpm` }} enable_containerd_audit={{ user `enable_containerd_audit` }} kubernetes_enable_automatic_resource_sizing={{ user `kubernetes_enable_automatic_resource_sizing` }} debug_tools={{user `debug_tools`}} ubuntu_repo={{user `ubuntu_repo`}} ubuntu_security_repo={{user `ubuntu_security_repo`}} gpu_block_nouveau_loading={{user `block_nouveau_loading`}} runc_version={{user `runc_version`}} containerd_service_url={{user `containerd_service_url`}} netplan_removal_excludes=\"{{user `netplan_removal_excludes`}}\"", "ansible_scp_extra_args": "{{env `ANSIBLE_SCP_EXTRA_ARGS`}}" } diff --git a/images/capi/packer/config/common.json b/images/capi/packer/config/common.json index e41c230127..f8224ea962 100644 --- a/images/capi/packer/config/common.json +++ b/images/capi/packer/config/common.json @@ -3,6 +3,7 @@ "debug_tools": "false", "disable_public_repos": "false", "extra_debs": "", + "extra_kernel_boot_params": "", "extra_repos": "", "extra_rpms": "", "firstboot_custom_roles_post": "", @@ -14,7 +15,7 @@ "node_custom_roles_post": "", "node_custom_roles_post_sysprep": "", "node_custom_roles_pre": "", - "pause_image": "registry.k8s.io/pause:3.10", + "pause_image": "registry.k8s.io/pause:3.10.2", "pip_conf_file": "", "redhat_epel_rpm": "https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm", "reenable_public_repos": "true", diff --git a/images/capi/packer/config/containerd.json b/images/capi/packer/config/containerd.json index a7b52a56f3..08b1d789da 100644 --- a/images/capi/packer/config/containerd.json +++ b/images/capi/packer/config/containerd.json @@ -3,6 +3,7 @@ "containerd_cri_socket": "/var/run/containerd/containerd.sock", "containerd_gvisor_runtime": "false", "containerd_gvisor_version": "latest", - "containerd_version": "1.7.25", - "runc_version": "1.2.3" + "containerd_image_pull_progress_timeout": null, + "containerd_version": "2.2.2", + "runc_version": "1.3.4" } diff --git a/images/capi/packer/config/kubernetes.json b/images/capi/packer/config/kubernetes.json index 6e0deb7196..886ac748ea 100644 --- a/images/capi/packer/config/kubernetes.json +++ b/images/capi/packer/config/kubernetes.json @@ -1,21 +1,20 @@ { - "crictl_source_type": "pkg", - "crictl_version": "1.32.0", + "crictl_version": "1.34.0", "kubeadm_template": "etc/kubeadm.yml", "kubernetes_apiserver_port": "6443", "kubernetes_container_registry": "registry.k8s.io", "kubernetes_deb_gpg_key": "https://pkgs.k8s.io/core:/stable:/{{ user `kubernetes_series` }}/deb/Release.key", "kubernetes_deb_repo": "https://pkgs.k8s.io/core:/stable:/{{ user `kubernetes_series` }}/deb/", - "kubernetes_deb_version": "1.32.4-1.1", + "kubernetes_deb_version": "1.34.3-1.1", "kubernetes_http_source": "https://dl.k8s.io/release", "kubernetes_load_additional_imgs": "false", "kubernetes_rpm_gpg_check": "True", "kubernetes_rpm_gpg_key": "https://pkgs.k8s.io/core:/stable:/{{ user `kubernetes_series` }}/rpm/repodata/repomd.xml.key", "kubernetes_rpm_repo": "https://pkgs.k8s.io/core:/stable:/{{ user `kubernetes_series` }}/rpm/", "kubernetes_rpm_repo_arch": "x86_64", - "kubernetes_rpm_version": "1.32.4", - "kubernetes_semver": "v1.32.4", - "kubernetes_series": "v1.32", + "kubernetes_rpm_version": "1.34.3", + "kubernetes_semver": "v1.34.3", + "kubernetes_series": "v1.34", "kubernetes_source_type": "pkg", "systemd_prefix": "/usr/lib/systemd", "sysusr_prefix": "/usr", diff --git a/images/capi/packer/config/ppc64le/containerd.json b/images/capi/packer/config/ppc64le/containerd.json index b7e2aca97b..78d4944db8 100644 --- a/images/capi/packer/config/ppc64le/containerd.json +++ b/images/capi/packer/config/ppc64le/containerd.json @@ -1,4 +1,5 @@ { - "containerd_sha256": "b2d4e44946e55a10835a327cbd98c0c2063011bbdebb95ef8c5e5677312f1d29", - "containerd_version": "1.7.25" + "containerd_image_pull_progress_timeout": null, + "containerd_sha256": "8f7a8190f2a635cd0e5580a131408a275ba277f7a04edffba4a4005960093987", + "containerd_version": "2.2.2" } diff --git a/images/capi/packer/config/windows/ansible-args-windows.json b/images/capi/packer/config/windows/ansible-args-windows.json index 02120012fe..f9a3231092 100644 --- a/images/capi/packer/config/windows/ansible-args-windows.json +++ b/images/capi/packer/config/windows/ansible-args-windows.json @@ -1,3 +1,3 @@ { - "ansible_common_vars": "runtime={{user `runtime`}} containerd_url={{user `containerd_url`}} containerd_sha256={{user `containerd_sha256_windows`}} containerd_version={{user `containerd_version`}} pause_image={{user `pause_image`}} additional_debug_files=\"{{user `additional_debug_files`}}\" containerd_additional_settings={{user `containerd_additional_settings`}} custom_role_names=\"{{user `custom_role_names`}}\" http_proxy={{user `http_proxy`}} https_proxy={{user `https_proxy`}} no_proxy={{user `no_proxy`}} kubernetes_base_url={{user `kubernetes_base_url`}} kubernetes_semver={{user `kubernetes_semver`}} kubernetes_install_path={{user `kubernetes_install_path`}} cloudbase_init_url=\"{{user `cloudbase_init_url`}}\" cloudbase_plugins=\"{{user `cloudbase_plugins`}}\" cloudbase_metadata_services=\"{{user `cloudbase_metadata_services`}}\" cloudbase_plugins_unattend=\"{{user `cloudbase_plugins_unattend`}}\" cloudbase_metadata_services_unattend=\"{{user `cloudbase_metadata_services_unattend`}}\" prepull={{user `prepull`}} windows_updates_kbs=\"{{user `windows_updates_kbs`}}\" windows_updates_categories=\"{{user `windows_updates_categories`}}\" windows_service_manager={{user `windows_service_manager`}} nssm_url={{user `nssm_url`}} distribution_version={{user `distribution_version`}} netbios_host_name_compatibility={{user `netbios_host_name_compatibility`}} disable_hypervisor={{ user `disable_hypervisor` }} cloudbase_logging_serial_port={{ user `cloudbase_logging_serial_port` }} cloudbase_real_time_clock_utc={{ user `cloudbase_real_time_clock_utc` }} load_additional_components={{ user `load_additional_components`}} ecr_credential_provider={{ user `ecr_credential_provider` }} additional_registry_images={{ user `additional_registry_images`}} additional_registry_images_list={{ user `additional_registry_images_list`}} additional_url_images={{ user `additional_url_images`}} additional_url_images_list={{ user `additional_url_images_list`}} additional_executables={{ user `additional_executables`}} additional_executables_list={{ user `additional_executables_list`}} additional_executables_destination_path={{ user `additional_executables_destination_path`}} ssh_source_url={{user `ssh_source_url` }} debug_tools={{user `debug_tools`}}" + "ansible_common_vars": "runtime={{user `runtime`}} containerd_url={{user `containerd_url`}} containerd_sha256={{user `containerd_sha256_windows`}} containerd_version={{user `containerd_version`}} containerd_image_pull_progress_timeout={{user `containerd_image_pull_progress_timeout`}} pause_image={{user `pause_image`}} additional_debug_files=\"{{user `additional_debug_files`}}\" containerd_additional_settings={{user `containerd_additional_settings`}} custom_role_names=\"{{user `custom_role_names`}}\" http_proxy={{user `http_proxy`}} https_proxy={{user `https_proxy`}} no_proxy={{user `no_proxy`}} kubernetes_base_url={{user `kubernetes_base_url`}} kubernetes_semver={{user `kubernetes_semver`}} kubernetes_install_path={{user `kubernetes_install_path`}} cloudbase_init_url=\"{{user `cloudbase_init_url`}}\" cloudbase_plugins=\"{{user `cloudbase_plugins`}}\" cloudbase_metadata_services=\"{{user `cloudbase_metadata_services`}}\" cloudbase_plugins_unattend=\"{{user `cloudbase_plugins_unattend`}}\" cloudbase_metadata_services_unattend=\"{{user `cloudbase_metadata_services_unattend`}}\" prepull={{user `prepull`}} windows_updates_kbs=\"{{user `windows_updates_kbs`}}\" windows_updates_categories=\"{{user `windows_updates_categories`}}\" windows_service_manager={{user `windows_service_manager`}} nssm_url={{user `nssm_url`}} distribution_version={{user `distribution_version`}} netbios_host_name_compatibility={{user `netbios_host_name_compatibility`}} disable_hypervisor={{ user `disable_hypervisor` }} cloudbase_logging_serial_port={{ user `cloudbase_logging_serial_port` }} cloudbase_real_time_clock_utc={{ user `cloudbase_real_time_clock_utc` }} load_additional_components={{ user `load_additional_components`}} ecr_credential_provider={{ user `ecr_credential_provider` }} additional_registry_images={{ user `additional_registry_images`}} additional_registry_images_list={{ user `additional_registry_images_list`}} additional_url_images={{ user `additional_url_images`}} additional_url_images_list={{ user `additional_url_images_list`}} additional_executables={{ user `additional_executables`}} additional_executables_list={{ user `additional_executables_list`}} additional_executables_destination_path={{ user `additional_executables_destination_path`}} ssh_source_url={{user `ssh_source_url` }} debug_tools={{user `debug_tools`}}" } diff --git a/images/capi/packer/config/windows/containerd.json b/images/capi/packer/config/windows/containerd.json index ad43316008..7120a37b18 100644 --- a/images/capi/packer/config/windows/containerd.json +++ b/images/capi/packer/config/windows/containerd.json @@ -1,4 +1,5 @@ { "containerd_additional_settings": null, + "containerd_image_pull_progress_timeout": null, "containerd_url": "https://github.com/containerd/containerd/releases/download/v{{user `containerd_version`}}/containerd-{{user `containerd_version`}}-windows-amd64.tar.gz" } diff --git a/images/capi/packer/digitalocean/packer.json b/images/capi/packer/digitalocean/packer.json index 73e4051d25..35750d72ea 100644 --- a/images/capi/packer/digitalocean/packer.json +++ b/images/capi/packer/digitalocean/packer.json @@ -45,6 +45,7 @@ "build_timestamp": "{{timestamp}}", "containerd_gvisor_runtime": "false", "containerd_gvisor_version": "latest", + "containerd_image_pull_progress_timeout": null, "containerd_service_url": "https://raw.githubusercontent.com/containerd/containerd/refs/tags/v{{user `containerd_version`}}/containerd.service", "containerd_version": null, "crictl_version": null, diff --git a/images/capi/packer/files/flatcar/clc/bootstrap-cloud.yaml b/images/capi/packer/files/flatcar/clc/bootstrap-cloud.yaml index 6f04aa040e..8594349eb3 100644 --- a/images/capi/packer/files/flatcar/clc/bootstrap-cloud.yaml +++ b/images/capi/packer/files/flatcar/clc/bootstrap-cloud.yaml @@ -2,10 +2,12 @@ # authorize SSH keys (typically cloud providers such as AWS or Azure). On such platforms, no SSH # configuration needs to be done via Ignition. The actions in this file are performed before Packer # provisioners (e.g. Ansible) are executed. +variant: flatcar +version: 1.1.0 systemd: units: - name: docker.service - enable: true + enabled: true # Mask update-engine and locksmithd to disable automatic updates during image creation. - name: update-engine.service mask: true diff --git a/images/capi/packer/files/flatcar/clc/bootstrap-pass-auth.yaml b/images/capi/packer/files/flatcar/clc/bootstrap-pass-auth.yaml index 13b364452d..3aef9d32f3 100644 --- a/images/capi/packer/files/flatcar/clc/bootstrap-pass-auth.yaml +++ b/images/capi/packer/files/flatcar/clc/bootstrap-pass-auth.yaml @@ -1,6 +1,8 @@ # This file is used for initial provisioning of a Flatcar machine on platforms which use SSH # password authentication during the build process. The actions in this file are performed before # Packer provisioners (e.g. Ansible) are executed. +variant: flatcar +version: 1.1.0 passwd: users: - name: builder @@ -13,7 +15,7 @@ passwd: systemd: units: - name: docker.service - enable: true + enabled: true # Mask update-engine and locksmithd to disable automatic updates during image creation. - name: update-engine.service mask: true diff --git a/images/capi/packer/files/flatcar/ignition/bootstrap-cloud.json b/images/capi/packer/files/flatcar/ignition/bootstrap-cloud.json index 58ee4eef34..8a937079d8 100644 --- a/images/capi/packer/files/flatcar/ignition/bootstrap-cloud.json +++ b/images/capi/packer/files/flatcar/ignition/bootstrap-cloud.json @@ -1,19 +1,11 @@ { "ignition": { - "config": {}, - "security": { - "tls": {} - }, - "timeouts": {}, - "version": "2.3.0" + "version": "3.4.0" }, - "networkd": {}, - "passwd": {}, - "storage": {}, "systemd": { "units": [ { - "enable": true, + "enabled": true, "name": "docker.service" }, { diff --git a/images/capi/packer/files/flatcar/ignition/bootstrap-pass-auth.json b/images/capi/packer/files/flatcar/ignition/bootstrap-pass-auth.json index 64b5030d89..418b6187e4 100644 --- a/images/capi/packer/files/flatcar/ignition/bootstrap-pass-auth.json +++ b/images/capi/packer/files/flatcar/ignition/bootstrap-pass-auth.json @@ -1,13 +1,7 @@ { "ignition": { - "config": {}, - "security": { - "tls": {} - }, - "timeouts": {}, - "version": "2.3.0" + "version": "3.4.0" }, - "networkd": {}, "passwd": { "users": [ { @@ -21,11 +15,10 @@ } ] }, - "storage": {}, "systemd": { "units": [ { - "enable": true, + "enabled": true, "name": "docker.service" }, { diff --git a/images/capi/packer/gce/ci/nightly/overwrite-1-34.json b/images/capi/packer/gce/ci/nightly/overwrite-1-34.json new file mode 100644 index 0000000000..8a36fb79e5 --- /dev/null +++ b/images/capi/packer/gce/ci/nightly/overwrite-1-34.json @@ -0,0 +1,8 @@ +{ + "build_timestamp": "nightly", + "kubernetes_deb_version": "1.34.3-1.1", + "kubernetes_rpm_version": "1.34.3", + "kubernetes_semver": "v1.34.3", + "kubernetes_series": "v1.34", + "service_account_email": "gcb-builder-cluster-api-gcp@k8s-staging-cluster-api-gcp.iam.gserviceaccount.com" +} diff --git a/images/capi/packer/gce/ci/nightly/overwrite-1-35.json b/images/capi/packer/gce/ci/nightly/overwrite-1-35.json new file mode 100644 index 0000000000..801e4116f1 --- /dev/null +++ b/images/capi/packer/gce/ci/nightly/overwrite-1-35.json @@ -0,0 +1,8 @@ +{ + "build_timestamp": "nightly", + "kubernetes_deb_version": "1.35.0-1.1", + "kubernetes_rpm_version": "1.35.0", + "kubernetes_semver": "v1.35.0", + "kubernetes_series": "v1.35", + "service_account_email": "gcb-builder-cluster-api-gcp@k8s-staging-cluster-api-gcp.iam.gserviceaccount.com" +} diff --git a/images/capi/packer/gce/packer.json b/images/capi/packer/gce/packer.json index 341f019f27..498e57e580 100644 --- a/images/capi/packer/gce/packer.json +++ b/images/capi/packer/gce/packer.json @@ -19,6 +19,7 @@ "service_account_email": "{{ user `service_account_email` }}", "source_image_family": "{{ user `source_image_family` }}", "ssh_username": "{{user `ssh_username`}}", + "tags": "{{ user `tags` }}", "type": "googlecompute", "use_internal_ip": "{{ user `use_internal_ip`}}", "zone": "{{ user `zone` }}" @@ -66,6 +67,7 @@ "PROVIDER": "gcp", "containerd_gvisor_runtime": "{{user `containerd_gvisor_runtime`}}", "containerd_gvisor_version": "{{user `containerd_gvisor_version`}}", + "containerd_image_pull_progress_timeout": "{{user `containerd_image_pull_progress_timeout`}}", "containerd_version": "{{user `containerd_version`}}", "kubernetes_cni_deb_version": "{{ user `kubernetes_cni_deb_version` }}", "kubernetes_cni_rpm_version": "{{ split (user `kubernetes_cni_rpm_version`) \"-\" 0 }}", @@ -86,6 +88,7 @@ "build_timestamp": "{{timestamp}}", "containerd_gvisor_runtime": "false", "containerd_gvisor_version": "latest", + "containerd_image_pull_progress_timeout": null, "containerd_service_url": "https://raw.githubusercontent.com/containerd/containerd/refs/tags/v{{user `containerd_version`}}/containerd.service", "containerd_version": null, "crictl_version": null, diff --git a/images/capi/packer/gce/rhel-8.json b/images/capi/packer/gce/rhel-8.json deleted file mode 100644 index d328a66235..0000000000 --- a/images/capi/packer/gce/rhel-8.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "build_name": "rhel-8", - "distribution": "rhel", - "distribution_release": "rhel-8", - "distribution_version": "8", - "epel_rpm_gpg_key": "https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-8", - "redhat_epel_rpm": "https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm", - "source_image_family": "rhel-8", - "ssh_username": "packer", - "zone": "us-central1-a" -} diff --git a/images/capi/packer/goss/goss-command.yaml b/images/capi/packer/goss/goss-command.yaml index 1fa9971982..d82a96449d 100644 --- a/images/capi/packer/goss/goss-command.yaml +++ b/images/capi/packer/goss/goss-command.yaml @@ -1,8 +1,9 @@ command: {{ if ne .Vars.OS "windows" }} # Linux Only - containerd --version | awk -F' ' '{print substr($3,2); }': + PATH=/opt/bin:/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/bin:/sbin containerd --version | awk -F' ' '{print substr($3,2); }': exit-status: 0 - stdout: [] + stdout: + - "{{.Vars.containerd_version}}" stderr: [] timeout: 0 crictl ps: @@ -10,27 +11,42 @@ command: stdout: [] stderr: [] timeout: 0 -{{if ne .Vars.containerd_wasm_shims_runtimes ""}} +{{if and (semverCompare ">=2.0.0" .Vars.containerd_version) (ne .Vars.containerd_image_pull_progress_timeout "")}} + grep -E 'image_pull_progress_timeout = "{{.Vars.containerd_image_pull_progress_timeout}}"' /etc/containerd/config.toml: + exit-status: 0 + stdout: [ ] + stderr: [ ] + timeout: 0 +{{end}} +{{if contains "lunatic" .Vars.containerd_wasm_shims_runtimes}} containerd-shim-lunatic-v1: exit-status: 1 stdout: [ ] stderr: ["io.containerd.lunatic.v1: InvalidArgument(\"Shim namespace cannot be empty\")"] timeout: 0 +{{end}} +{{if contains "slight" .Vars.containerd_wasm_shims_runtimes}} containerd-shim-slight-v1: exit-status: 1 stdout: [ ] stderr: ["io.containerd.slight.v1: InvalidArgument(\"Shim namespace cannot be empty\")"] timeout: 0 +{{end}} +{{if contains "spin" .Vars.containerd_wasm_shims_runtimes}} containerd-shim-spin-v2: exit-status: 1 stdout: [ ] stderr: ["io.containerd.spin.v2: InvalidArgument(\"Shim namespace cannot be empty\")"] timeout: 0 +{{end}} +{{if contains "wws" .Vars.containerd_wasm_shims_runtimes}} containerd-shim-wws-v1: exit-status: 1 stdout: [ ] stderr: ["io.containerd.wws.v1: InvalidArgument(\"Shim namespace cannot be empty\")"] timeout: 0 +{{end}} +{{if ne .Vars.containerd_wasm_shims_runtimes ""}} grep -E 'io\.containerd\.(lunatic|slight|spin|wws)\.v' /etc/containerd/config.toml: exit-status: 0 stdout: [ ] @@ -190,25 +206,73 @@ command: - "{{.Vars.containerd_version}}" timeout: 30000 {{ if (semverCompare ">=2.0.0" .Vars.containerd_version) }} - Correct Containerd config: - exec: "\"/Program Files/containerd/containerd.exe\" config dump" + Correct Containerd sandbox config: + exec: "powershell -command \"Get-Content 'C:\\Program Files\\containerd\\config.toml' | Select-String -SimpleMatch 'sandbox = \\\"{{.Vars.pause_image}}\\\"'\"" exit-status: 0 stdout: - "sandbox = \"{{.Vars.pause_image}}\"" + timeout: 30000 + {{ if ne .Vars.containerd_image_pull_progress_timeout "" }} + Correct Containerd image pull timeout config: + exec: "powershell -command \"Get-Content 'C:\\Program Files\\containerd\\config.toml' | Select-String -SimpleMatch 'image_pull_progress_timeout = \\\"{{.Vars.containerd_image_pull_progress_timeout}}\\\"'\"" + exit-status: 0 + stdout: + - "image_pull_progress_timeout = \"{{.Vars.containerd_image_pull_progress_timeout}}\"" + timeout: 30000 + {{ end }} + Correct Containerd CNI conf_dir: + exec: "powershell -command \"Get-Content 'C:\\Program Files\\containerd\\config.toml' | Select-String -SimpleMatch 'conf_dir = \\\"C:/etc/cni/net.d\\\"'\"" + exit-status: 0 + stdout: - "conf_dir = \"C:/etc/cni/net.d\"" + timeout: 30000 + Correct Containerd CNI bin_dir: + exec: "powershell -command \"Get-Content 'C:\\Program Files\\containerd\\config.toml' | Select-String -SimpleMatch 'bin_dir = \\\"C:/opt/cni/bin\\\"'\"" + exit-status: 0 + stdout: - "bin_dir = \"C:/opt/cni/bin\"" + timeout: 30000 + Correct Containerd root: + exec: "powershell -command \"Get-Content 'C:\\Program Files\\containerd\\config.toml' | Select-String -SimpleMatch 'root = \\\"C:\\\\ProgramData\\\\containerd\\\\root\\\"'\"" + exit-status: 0 + stdout: - "root = \"C:\\\\ProgramData\\\\containerd\\\\root\"" + timeout: 30000 + Correct Containerd state: + exec: "powershell -command \"Get-Content 'C:\\Program Files\\containerd\\config.toml' | Select-String -SimpleMatch 'state = \\\"C:\\\\ProgramData\\\\containerd\\\\state\\\"'\"" + exit-status: 0 + stdout: - "state = \"C:\\\\ProgramData\\\\containerd\\\\state\"" timeout: 30000 {{ else }} - Correct Containerd config: - exec: "\"/Program Files/containerd/containerd.exe\" config dump" + Correct Containerd sandbox config: + exec: "powershell -command \"Get-Content 'C:\\Program Files\\containerd\\config.toml' | Select-String -SimpleMatch 'sandbox_image = \\\"{{.Vars.pause_image}}\\\"'\"" exit-status: 0 stdout: - "sandbox_image = \"{{.Vars.pause_image}}\"" + timeout: 30000 + Correct Containerd CNI conf_dir: + exec: "powershell -command \"Get-Content 'C:\\Program Files\\containerd\\config.toml' | Select-String -SimpleMatch 'conf_dir = \\\"C:/etc/cni/net.d\\\"'\"" + exit-status: 0 + stdout: - "conf_dir = \"C:/etc/cni/net.d\"" + timeout: 30000 + Correct Containerd CNI bin_dir: + exec: "powershell -command \"Get-Content 'C:\\Program Files\\containerd\\config.toml' | Select-String -SimpleMatch 'bin_dir = \\\"C:/opt/cni/bin\\\"'\"" + exit-status: 0 + stdout: - "bin_dir = \"C:/opt/cni/bin\"" + timeout: 30000 + Correct Containerd root: + exec: "powershell -command \"Get-Content 'C:\\Program Files\\containerd\\config.toml' | Select-String -SimpleMatch 'root = \\\"C:\\\\ProgramData\\\\containerd\\\\root\\\"'\"" + exit-status: 0 + stdout: - "root = \"C:\\\\ProgramData\\\\containerd\\\\root\"" + timeout: 30000 + Correct Containerd state: + exec: "powershell -command \"Get-Content 'C:\\Program Files\\containerd\\config.toml' | Select-String -SimpleMatch 'state = \\\"C:\\\\ProgramData\\\\containerd\\\\state\\\"'\"" + exit-status: 0 + stdout: - "state = \"C:\\\\ProgramData\\\\containerd\\\\state\"" timeout: 30000 {{ end }} diff --git a/images/capi/packer/goss/goss-files.yaml b/images/capi/packer/goss/goss-files.yaml index 2130871688..d38a634989 100644 --- a/images/capi/packer/goss/goss-files.yaml +++ b/images/capi/packer/goss/goss-files.yaml @@ -25,3 +25,10 @@ file: {{end}} {{end}} {{end}} +{{if .Vars.extra_kernel_boot_params }} + "/boot/grub/grub.cfg": + exists: true + filetype: file + contains: + - {{ .Vars.extra_kernel_boot_params }} +{{end}} diff --git a/images/capi/packer/goss/goss-vars.yaml b/images/capi/packer/goss/goss-vars.yaml index 08c9bc48d3..d0272ee97a 100644 --- a/images/capi/packer/goss/goss-vars.yaml +++ b/images/capi/packer/goss/goss-vars.yaml @@ -91,8 +91,10 @@ photon_5_rpms: &photon_5_rpms arch: "amd64" containerd_gvisor_runtime: "" containerd_gvisor_version: "" +containerd_image_pull_progress_timeout: "" containerd_version: "" containerd_wasm_shims_runtimes: "" +extra_kernel_boot_params: "" kubernetes_cni_source_type: "" kubernetes_cni_version: "" kubernetes_source_type: "" @@ -170,6 +172,18 @@ centos: cloud-init: cloud-utils-growpart: python2-pip: +almalinux: + common-package: *common_rpms + ova: + package: + open-vm-tools: + os_version: + - distro_version: "8" + package: + <<: *rh8_rpms + - distro_version: "9" + package: + <<: *rh9_rpms flatcar: common-service: systemd-timesyncd: @@ -189,6 +203,8 @@ flatcar: command: hcloud: command: + openstack: + command: photon: common-service: apparmor: diff --git a/images/capi/packer/hcloud/flatcar-arm64.json b/images/capi/packer/hcloud/flatcar-arm64.json index dffddaeaf4..558488a881 100644 --- a/images/capi/packer/hcloud/flatcar-arm64.json +++ b/images/capi/packer/hcloud/flatcar-arm64.json @@ -3,7 +3,6 @@ "arch": "arm64", "build_name": "flatcar", "crictl_arch": "arm64", - "crictl_source_type": "http", "distribution": "flatcar", "distribution_release": "{{env `FLATCAR_CHANNEL`}}", "distribution_version": "{{env `FLATCAR_CHANNEL`}}-{{env `FLATCAR_VERSION`}}", diff --git a/images/capi/packer/hcloud/flatcar.json b/images/capi/packer/hcloud/flatcar.json index f7ea8824af..70ca728d8f 100644 --- a/images/capi/packer/hcloud/flatcar.json +++ b/images/capi/packer/hcloud/flatcar.json @@ -2,7 +2,6 @@ "ansible_extra_vars": "ansible_python_interpreter=/opt/bin/python", "arch": "amd64", "build_name": "flatcar", - "crictl_source_type": "http", "distribution": "flatcar", "distribution_release": "{{env `FLATCAR_CHANNEL`}}", "distribution_version": "{{env `FLATCAR_CHANNEL`}}-{{env `FLATCAR_VERSION`}}", diff --git a/images/capi/packer/hcloud/packer-flatcar.json b/images/capi/packer/hcloud/packer-flatcar.json index de227d846a..1430cfcdbd 100644 --- a/images/capi/packer/hcloud/packer-flatcar.json +++ b/images/capi/packer/hcloud/packer-flatcar.json @@ -29,6 +29,7 @@ "build_name": "{{user `build_name`}}", "build_timestamp": "{{user `build_timestamp`}}", "build_type": "node", + "containerd_image_pull_progress_timeout": "{{user `containerd_image_pull_progress_timeout`}}", "containerd_version": "{{user `containerd_version`}}", "kubernetes_cni_semver": "{{user `kubernetes_cni_semver`}}", "kubernetes_semver": "{{user `kubernetes_semver`}}", @@ -137,6 +138,7 @@ "ansible_user_vars": "", "build_name": null, "build_timestamp": "{{timestamp}}", + "containerd_image_pull_progress_timeout": null, "containerd_service_url": "https://raw.githubusercontent.com/containerd/containerd/refs/tags/v{{user `containerd_version`}}/containerd.service", "containerd_version": null, "containerd_wasm_shims_runtimes": null, diff --git a/images/capi/packer/hcloud/packer.json b/images/capi/packer/hcloud/packer.json index 3cc8d51dd2..19308d02c5 100644 --- a/images/capi/packer/hcloud/packer.json +++ b/images/capi/packer/hcloud/packer.json @@ -104,6 +104,7 @@ "PROVIDER": "hcloud", "containerd_gvisor_runtime": "{{user `containerd_gvisor_runtime`}}", "containerd_gvisor_version": "{{user `containerd_gvisor_version`}}", + "containerd_image_pull_progress_timeout": "{{user `containerd_image_pull_progress_timeout`}}", "containerd_version": "{{user `containerd_version`}}", "containerd_wasm_shims_runtimes": "{{user `containerd_wasm_shims_runtimes` }}", "kubernetes_cni_deb_version": "{{ user `kubernetes_cni_deb_version` }}", @@ -127,6 +128,7 @@ "build_timestamp": "{{timestamp}}", "containerd_gvisor_runtime": "false", "containerd_gvisor_version": "latest", + "containerd_image_pull_progress_timeout": null, "containerd_service_url": "https://raw.githubusercontent.com/containerd/containerd/refs/tags/v{{user `containerd_version`}}/containerd.service", "containerd_version": null, "containerd_wasm_shims_runtimes": null, diff --git a/images/capi/packer/hcloud/rockylinux-8.json b/images/capi/packer/hcloud/rockylinux-8.json deleted file mode 100644 index 720c62b16c..0000000000 --- a/images/capi/packer/hcloud/rockylinux-8.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "build_name": "rockylinux-8", - "distribution": "rockylinux", - "distribution_release": "Core", - "distribution_version": "8", - "epel_rpm_gpg_key": "https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-8", - "image": "rocky-8", - "redhat_epel_rpm": "https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm", - "ssh_username": "root" -} diff --git a/images/capi/packer/huaweicloud/packer.json b/images/capi/packer/huaweicloud/packer.json index c12ff3ebe9..abe9129f04 100644 --- a/images/capi/packer/huaweicloud/packer.json +++ b/images/capi/packer/huaweicloud/packer.json @@ -101,6 +101,7 @@ "PROVIDER": "huaweicloud", "containerd_gvisor_runtime": "{{user `containerd_gvisor_runtime`}}", "containerd_gvisor_version": "{{user `containerd_gvisor_version`}}", + "containerd_image_pull_progress_timeout": "{{user `containerd_image_pull_progress_timeout`}}", "containerd_version": "{{user `containerd_version`}}", "kubernetes_cni_deb_version": "{{ user `kubernetes_cni_deb_version` }}", "kubernetes_cni_rpm_version": "{{ split (user `kubernetes_cni_rpm_version`) \"-\" 0 }}", @@ -125,6 +126,7 @@ "build_timestamp": "{{timestamp}}", "containerd_gvisor_runtime": "false", "containerd_gvisor_version": "latest", + "containerd_image_pull_progress_timeout": null, "containerd_service_url": "https://raw.githubusercontent.com/containerd/containerd/refs/tags/v{{user `containerd_version`}}/containerd.service", "containerd_version": null, "crictl_version": null, diff --git a/images/capi/packer/maas/maas-ubuntu-2404-efi.json b/images/capi/packer/maas/maas-ubuntu-2404-efi.json index de66ad7ec9..c57b05fecc 100644 --- a/images/capi/packer/maas/maas-ubuntu-2404-efi.json +++ b/images/capi/packer/maas/maas-ubuntu-2404-efi.json @@ -5,9 +5,9 @@ "distro_name": "ubuntu", "firmware": "OVMF.fd", "guest_os_type": "ubuntu-64", - "iso_checksum": "c3514bf0056180d09376462a7a1b4f213c1d6e8ea67fae5c25099c6fd3d8274b", + "iso_checksum": "e907d92eeec9df64163a7e454cbc8d7755e8ddc7ed42f99dbc80c40f1a138433", "iso_checksum_type": "sha256", - "iso_url": "https://releases.ubuntu.com/releases/24.04/ubuntu-24.04.3-live-server-amd64.iso", + "iso_url": "https://releases.ubuntu.com/releases/24.04/ubuntu-24.04.4-live-server-amd64.iso", "os_display_name": "Ubuntu 24.04", "shutdown_command": "shutdown -P now", "unmount_iso": "true" diff --git a/images/capi/packer/maas/packer.json.tmpl b/images/capi/packer/maas/packer.json.tmpl index 9c7b1cebb1..48734e1bb5 100644 --- a/images/capi/packer/maas/packer.json.tmpl +++ b/images/capi/packer/maas/packer.json.tmpl @@ -114,6 +114,13 @@ "type": "ansible", "user": "builder" }, + { + "inline": [ + "sudo rm -f /etc/fstab" + ], + "inline_shebang": "/bin/bash -e", + "type": "shell" + }, { "arch": "{{user `goss_arch`}}", "format": "{{user `goss_format`}}", @@ -132,6 +139,7 @@ "OS": "{{user `distro_name` | lower}}", "OS_VERSION": "{{user `distribution_version` | lower}}", "PROVIDER": "qemu", + "containerd_image_pull_progress_timeout": "{{user `containerd_image_pull_progress_timeout`}}", "containerd_version": "{{user `containerd_version`}}", "kubernetes_cni_deb_version": "{{ user `kubernetes_cni_deb_version` }}", "kubernetes_cni_rpm_version": "{{ split (user `kubernetes_cni_rpm_version`) \"-\" 0 }}", @@ -158,9 +166,10 @@ "containerd_service_url": "https://raw.githubusercontent.com/containerd/containerd/refs/tags/v{{user `containerd_version`}}/containerd.service", "containerd_url": "https://github.com/containerd/containerd/releases/download/v{{user `containerd_version`}}/containerd-{{user `containerd_version`}}-linux-amd64.tar.gz", "containerd_version": null, + "containerd_image_pull_progress_timeout": null, "cpus": "1", "crictl_version": null, - "crictl_url": "https://github.com/kubernetes-sigs/cri-tools/releases/download/v{{user `crictl_version`}}/crictl-v{{user `crictl_version`}}-linux-amd64.tar.gz", + "crictl_url": "https://github.com/kubernetes-sigs/cri-tools/releases/download/v{{user `crictl_version`}}/crictl-v{{user `crictl_version`}}-linux-amd64.tar.gz", "disk_compression": "false", "disk_discard": "unmap", "disk_image": "false", @@ -188,7 +197,7 @@ "kubernetes_series": null, "kubernetes_source_type": null, "runc_url": "https://github.com/opencontainers/runc/releases/download/v{{user `runc_version`}}/runc.amd64", - "runc_version": null, + "runc_version": null, "machine_id_mode": "444", "memory": "2048", "oem_id": "", diff --git a/images/capi/packer/nutanix/flatcar.json b/images/capi/packer/nutanix/flatcar.json index 0f57bd1a43..2a6075a0c7 100644 --- a/images/capi/packer/nutanix/flatcar.json +++ b/images/capi/packer/nutanix/flatcar.json @@ -2,7 +2,6 @@ "ansible_extra_vars": "ansible_python_interpreter=/opt/bin/python3", "build_name": "flatcar-{{env `FLATCAR_CHANNEL`}}-{{env `FLATCAR_VERSION`}}", "channel_name": "{{env `FLATCAR_CHANNEL`}}", - "crictl_source_type": "http", "distribution": "flatcar", "distribution_release": "Core", "distribution_version": "{{env `FLATCAR_CHANNEL`}}", diff --git a/images/capi/packer/nutanix/packer-windows.json b/images/capi/packer/nutanix/packer-windows.json index 88ec188636..322481d096 100644 --- a/images/capi/packer/nutanix/packer-windows.json +++ b/images/capi/packer/nutanix/packer-windows.json @@ -106,6 +106,7 @@ "vars_inline": { "OS": "{{user `distro_name` | lower}}", "PROVIDER": "nutanix", + "containerd_image_pull_progress_timeout": "{{user `containerd_image_pull_progress_timeout`}}", "containerd_version": "{{user `containerd_version`}}", "distribution_version": "{{user `distro_version`}}", "kubernetes_version": "{{user `kubernetes_semver`}}", @@ -142,7 +143,7 @@ "image_delete": "false", "image_export": "false", "image_name": "{{user `build_name`}}-kube-{{user `kubernetes_semver`}}", - "kubernetes_base_url": "https://kubernetesreleases.blob.core.windows.net/kubernetes/{{user `kubernetes_semver`}}/binaries/node/windows/{{user `kubernetes_goarch`}}", + "kubernetes_base_url": "https://kubernetesreleases.blob.core.windows.net/kubernetes/{{user `kubernetes_semver`}}/bin/windows/{{user `kubernetes_goarch`}}", "kubernetes_container_registry": null, "kubernetes_http_package_url": "", "kubernetes_http_source": null, @@ -161,7 +162,7 @@ "nutanix_port": "{{env `NUTANIX_PORT`}}", "nutanix_subnet_name": "{{env `NUTANIX_SUBNET_NAME`}}", "nutanix_username": "{{env `NUTANIX_USERNAME`}}", - "scp_extra_vars": "", + "scp_extra_vars": "{{user `ansible_scp_extra_args`}}", "vm_force_delete": "false", "windows_admin_password": "{{env `WINDOWS_ADMIN_PASSWORD`}}" } diff --git a/images/capi/packer/nutanix/packer.json.tmpl b/images/capi/packer/nutanix/packer.json.tmpl index 67e0c07e1d..b71042195b 100644 --- a/images/capi/packer/nutanix/packer.json.tmpl +++ b/images/capi/packer/nutanix/packer.json.tmpl @@ -97,6 +97,7 @@ "PROVIDER": "nutanix", "containerd_gvisor_runtime": "{{user `containerd_gvisor_runtime`}}", "containerd_gvisor_version": "{{user `containerd_gvisor_version`}}", + "containerd_image_pull_progress_timeout": "{{user `containerd_image_pull_progress_timeout`}}", "containerd_version": "{{user `containerd_version`}}", "kubernetes_cni_deb_version": "{{ user `kubernetes_cni_deb_version` }}", "kubernetes_cni_rpm_version": "{{ split (user `kubernetes_cni_rpm_version`) \"-\" 0 }}", @@ -118,6 +119,7 @@ "containerd_gvisor_runtime": "false", "containerd_gvisor_version": "latest", "containerd_version": null, + "containerd_image_pull_progress_timeout": null, "containerd_service_url": "https://raw.githubusercontent.com/containerd/containerd/refs/tags/v{{user `containerd_version`}}/containerd.service", "runc_version": null, "cpus": "1", @@ -155,7 +157,7 @@ "nutanix_subnet_name": "{{env `NUTANIX_SUBNET_NAME`}}", "nutanix_username": "{{env `NUTANIX_USERNAME`}}", "python_path": "", - "scp_extra_vars": "", + "scp_extra_vars": "{{user `ansible_scp_extra_args`}}", "source_image_delete": "false", "source_image_force": "false", "ssh_password": "$SSH_PASSWORD", diff --git a/images/capi/packer/nutanix/rhel-8.json b/images/capi/packer/nutanix/rhel-8.json deleted file mode 100644 index 9aba21d66a..0000000000 --- a/images/capi/packer/nutanix/rhel-8.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "build_name": "rhel-8", - "distribution_version": "8", - "distro_name": "rhel", - "epel_rpm_gpg_key": "https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-8", - "extra_rpms": "", - "guest_os_type": "Linux", - "image_url": "https://REPLACE_YOUR_SERVER/redhat/8/rhel-8.8-x86_64-kvm.qcow2", - "redhat_epel_rpm": "https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm", - "shutdown_command": "shutdown -P now", - "user_data": "I2Nsb3VkLWNvbmZpZwp1c2VyczoKICAtIG5hbWU6IGJ1aWxkZXIKICAgIHN1ZG86IFsnQUxMPShBTEwpIE5PUEFTU1dEOkFMTCddCmNocGFzc3dkOgogIGxpc3Q6IHwKICAgIGJ1aWxkZXI6YnVpbGRlcgogIGV4cGlyZTogRmFsc2UKc3NoX3B3YXV0aDogVHJ1ZQ==" -} diff --git a/images/capi/packer/nutanix/rockylinux-8.json b/images/capi/packer/nutanix/rockylinux-8.json deleted file mode 100644 index c40ce5e8ab..0000000000 --- a/images/capi/packer/nutanix/rockylinux-8.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "boot_type": "uefi", - "build_name": "rockylinux-8", - "distribution": "rockylinux", - "distribution_release": "Core", - "distribution_version": "8", - "distro_name": "rockylinux", - "epel_rpm_gpg_key": "https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-8", - "extra_rpms": "python3", - "guest_os_type": "Linux", - "image_url": "https://dl.rockylinux.org/pub/rocky/8/images/x86_64/Rocky-8-GenericCloud-Base.latest.x86_64.qcow2", - "redhat_epel_rpm": "https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm", - "shutdown_command": "shutdown -P now", - "user_data": "I2Nsb3VkLWNvbmZpZwp1c2VyczoKICAtIG5hbWU6IGJ1aWxkZXIKICAgIHN1ZG86IFsnQUxMPShBTEwpIE5PUEFTU1dEOkFMTCddCmNocGFzc3dkOgogIGxpc3Q6IHwKICAgIGJ1aWxkZXI6YnVpbGRlcgogIGV4cGlyZTogRmFsc2UKc3NoX3B3YXV0aDogVHJ1ZQ==" -} diff --git a/images/capi/packer/oci/oracle-linux-8.json b/images/capi/packer/oci/oracle-linux-8.json deleted file mode 100644 index 3949ed1c51..0000000000 --- a/images/capi/packer/oci/oracle-linux-8.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "build_name": "oracle-linux-8", - "distribution": "Oracle Linux", - "operating_system": "Oracle Linux", - "operating_system_version": "8", - "redhat_epel_rpm": "oracle-epel-release-el8", - "ssh_username": "opc" -} diff --git a/images/capi/packer/oci/packer-windows.json b/images/capi/packer/oci/packer-windows.json index 4eadb67cde..fb395cb16b 100644 --- a/images/capi/packer/oci/packer-windows.json +++ b/images/capi/packer/oci/packer-windows.json @@ -35,6 +35,7 @@ "build_name": "{{user `build_name`}}", "build_timestamp": "{{user `build_timestamp`}}", "build_type": "node", + "containerd_image_pull_progress_timeout": "{{user `containerd_image_pull_progress_timeout`}}", "containerd_version": "{{user `containerd_version`}}", "kubernetes_cni_semver": "{{user `kubernetes_cni_semver`}}", "kubernetes_semver": "{{user `kubernetes_semver`}}", @@ -123,11 +124,12 @@ "cloudbase_metadata_services_unattend": "cloudbaseinit.metadata.services.httpservice.HttpService", "cloudbase_plugins": "cloudbaseinit.plugins.common.userdata.UserDataPlugin, cloudbaseinit.plugins.common.ephemeraldisk.EphemeralDiskPlugin, cloudbaseinit.plugins.common.mtu.MTUPlugin, cloudbaseinit.plugins.common.sethostname.SetHostNamePlugin", "cloudbase_plugins_unattend": "cloudbaseinit.plugins.common.mtu.MTUPlugin", + "containerd_image_pull_progress_timeout": null, "containerd_url": "", "containerd_version": null, "ib_version": "{{env `IB_VERSION`}}", "image_version": "latest", - "kubernetes_base_url": "https://kubernetesreleases.blob.core.windows.net/kubernetes/{{user `kubernetes_semver`}}/binaries/node/windows/{{user `kubernetes_goarch`}}", + "kubernetes_base_url": "https://kubernetesreleases.blob.core.windows.net/kubernetes/{{user `kubernetes_semver`}}/bin/windows/{{user `kubernetes_goarch`}}", "manifest_output": "manifest.json", "nssm_url": null, "ocpus": "2", diff --git a/images/capi/packer/oci/packer.json b/images/capi/packer/oci/packer.json index 638d4e8edd..73fdcc0750 100644 --- a/images/capi/packer/oci/packer.json +++ b/images/capi/packer/oci/packer.json @@ -1,6 +1,8 @@ { "builders": [ { + "access_cfg_file": "{{user `access_cfg_file`}}", + "access_cfg_file_account": "{{user `access_cfg_file_account`}}", "availability_domain": "{{user `availability_domain`}}", "base_image_filter": { "operating_system": "{{user `operating_system`}}", @@ -76,6 +78,7 @@ "PROVIDER": "oci", "containerd_gvisor_runtime": "{{user `containerd_gvisor_runtime`}}", "containerd_gvisor_version": "{{user `containerd_gvisor_version`}}", + "containerd_image_pull_progress_timeout": "{{user `containerd_image_pull_progress_timeout`}}", "containerd_version": "{{user `containerd_version`}}", "kubernetes_cni_deb_version": "{{ user `kubernetes_cni_deb_version` }}", "kubernetes_cni_rpm_version": "{{ split (user `kubernetes_cni_rpm_version`) \"-\" 0 }}", @@ -99,6 +102,7 @@ "compartment_ocid": "", "containerd_gvisor_runtime": "false", "containerd_gvisor_version": "latest", + "containerd_image_pull_progress_timeout": null, "containerd_service_url": "https://raw.githubusercontent.com/containerd/containerd/refs/tags/v{{user `containerd_version`}}/containerd.service", "containerd_version": null, "crictl_version": null, diff --git a/images/capi/packer/openstack/flatcar.json b/images/capi/packer/openstack/flatcar.json index 2c63585d1b..5c984db8d0 100644 --- a/images/capi/packer/openstack/flatcar.json +++ b/images/capi/packer/openstack/flatcar.json @@ -1,7 +1,6 @@ { "ansible_extra_vars": "ansible_python_interpreter=/opt/bin/python", "build_name": "flatcar", - "crictl_source_type": "http", "distro_name": "flatcar", "kubernetes_cni_source_type": "http", "kubernetes_source_type": "http", diff --git a/images/capi/packer/openstack/packer.json b/images/capi/packer/openstack/packer.json index 44ed7bbfc6..04a7bb60f1 100644 --- a/images/capi/packer/openstack/packer.json +++ b/images/capi/packer/openstack/packer.json @@ -95,6 +95,7 @@ "PROVIDER": "openstack", "containerd_gvisor_runtime": "{{user `containerd_gvisor_runtime`}}", "containerd_gvisor_version": "{{user `containerd_gvisor_version`}}", + "containerd_image_pull_progress_timeout": "{{user `containerd_image_pull_progress_timeout`}}", "containerd_version": "{{user `containerd_version`}}", "kubernetes_cni_deb_version": "{{ user `kubernetes_cni_deb_version` }}", "kubernetes_cni_rpm_version": "{{ split (user `kubernetes_cni_rpm_version`) \"-\" 0 }}", @@ -116,6 +117,7 @@ "build_timestamp": "{{timestamp}}", "containerd_gvisor_runtime": "false", "containerd_gvisor_version": "latest", + "containerd_image_pull_progress_timeout": null, "containerd_service_url": "https://raw.githubusercontent.com/containerd/containerd/refs/tags/v{{user `containerd_version`}}/containerd.service", "containerd_version": null, "crictl_version": null, diff --git a/images/capi/packer/outscale/packer.json b/images/capi/packer/outscale/packer.json index ad4bd5e740..1343cc198a 100644 --- a/images/capi/packer/outscale/packer.json +++ b/images/capi/packer/outscale/packer.json @@ -78,6 +78,7 @@ "PROVIDER": "outscale", "containerd_gvisor_runtime": "{{user `containerd_gvisor_runtime`}}", "containerd_gvisor_version": "{{user `containerd_gvisor_version`}}", + "containerd_image_pull_progress_timeout": "{{user `containerd_image_pull_progress_timeout`}}", "containerd_version": "{{user `containerd_version`}}", "kubernetes_cni_deb_version": "{{ user `kubernetes_cni_deb_version` }}", "kubernetes_cni_rpm_version": "{{ split (user `kubernetes_cni_rpm_version`) \"-\" 0 }}", @@ -99,6 +100,7 @@ "build_timestamp": "{{timestamp}}", "containerd_gvisor_runtime": "false", "containerd_gvisor_version": "latest", + "containerd_image_pull_progress_timeout": null, "containerd_service_url": "https://raw.githubusercontent.com/containerd/containerd/refs/tags/v{{user `containerd_version`}}/containerd.service", "containerd_version": null, "crictl_version": null, diff --git a/images/capi/packer/outscale/ubuntu-2204.json b/images/capi/packer/outscale/ubuntu-2204.json index 7e67c1df3e..ac74b43b25 100644 --- a/images/capi/packer/outscale/ubuntu-2204.json +++ b/images/capi/packer/outscale/ubuntu-2204.json @@ -3,5 +3,5 @@ "distribution": "ubuntu", "distribution_release": "ubuntu", "distribution_version": "2204", - "image_name": "Ubuntu-22.04-2023.12.04-0" + "image_name": "Ubuntu-22.04-2026-01-12" } diff --git a/images/capi/packer/outscale/ubuntu-2404.json b/images/capi/packer/outscale/ubuntu-2404.json index f5ce2f1a4c..43cd4337cd 100644 --- a/images/capi/packer/outscale/ubuntu-2404.json +++ b/images/capi/packer/outscale/ubuntu-2404.json @@ -3,5 +3,5 @@ "distribution": "ubuntu", "distribution_release": "ubuntu", "distribution_version": "2404", - "image_name": "Ubuntu-24.04-2025-07-07" + "image_name": "Ubuntu-24.04-2026-01-12" } diff --git a/images/capi/packer/ova/almalinux-9.json b/images/capi/packer/ova/almalinux-9.json new file mode 100644 index 0000000000..4787a8765c --- /dev/null +++ b/images/capi/packer/ova/almalinux-9.json @@ -0,0 +1,21 @@ +{ + "boot_command_prefix": " inst.stage2=hd:LABEL=AlmaLinux-9-4-x86_64-dvd inst.repo=cdrom inst.text inst.ks=cdrom:/ks.cfg rd.multipath=0 rd.live.check", + "boot_command_suffix": "", + "build_name": "almalinux-9", + "cd_content_location": "./packer/ova/linux/{{user `distro_name`}}/http/{{user `distro_version`}}/*", + "cd_label": "cidata", + "distro_arch": "amd64", + "distro_name": "almalinux", + "distro_version": "9", + "epel_rpm_gpg_key": "https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-9", + "firmware": "bios", + "guest_os_type": "rhel9-64", + "iso_checksum": "34c4285d524605da6dbd76b0b475338f6ea0a28bb88929bf14b04db68f1e1620", + "iso_checksum_type": "sha256", + "iso_url": "https://repo.almalinux.org/vault/9.4/isos/x86_64/AlmaLinux-9.4-x86_64-dvd.iso", + "os_display_name": "AlmaLinux 9", + "redhat_epel_rpm": "https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm", + "shutdown_command": "/sbin/halt -h -p", + "vmx_version": "18", + "vsphere_guest_os_type": "rhel9_64Guest" +} diff --git a/images/capi/packer/ova/config.pkr.hcl b/images/capi/packer/ova/config.pkr.hcl index 2595b89e71..d1b4dd6c35 100644 --- a/images/capi/packer/ova/config.pkr.hcl +++ b/images/capi/packer/ova/config.pkr.hcl @@ -3,7 +3,7 @@ packer { required_plugins { vsphere = { version = ">= 1.4.2" - source = "github.com/hashicorp/vsphere" + source = "github.com/vmware/vsphere" } } } diff --git a/images/capi/packer/ova/flatcar.json b/images/capi/packer/ova/flatcar.json index ebc1eea33e..d135f9550b 100644 --- a/images/capi/packer/ova/flatcar.json +++ b/images/capi/packer/ova/flatcar.json @@ -5,7 +5,6 @@ "build_name": "flatcar-{{env `FLATCAR_CHANNEL`}}-{{env `FLATCAR_VERSION`}}", "channel_name": "{{env `FLATCAR_CHANNEL`}}", "containerd_cri_socket": "/run/docker/libcontainerd/docker-containerd.sock", - "crictl_source_type": "http", "distro_name": "flatcar", "guest_os_type": "flatcar-64", "http_directory": "", diff --git a/images/capi/packer/ova/linux/rockylinux/http/8/ks.cfg.tmpl b/images/capi/packer/ova/linux/almalinux/http/9/ks.cfg.tmpl similarity index 87% rename from images/capi/packer/ova/linux/rockylinux/http/8/ks.cfg.tmpl rename to images/capi/packer/ova/linux/almalinux/http/9/ks.cfg.tmpl index 61c9f80758..a1d720770a 100644 --- a/images/capi/packer/ova/linux/rockylinux/http/8/ks.cfg.tmpl +++ b/images/capi/packer/ova/linux/almalinux/http/9/ks.cfg.tmpl @@ -1,5 +1,6 @@ -# Use CDROM installation media -repo --name="AppStream" --baseurl="http://download.rockylinux.org/pub/rocky/8/AppStream/x86_64/os/" +# Use DVD installation media +repo --name="BaseOS" --baseurl=file:///run/install/repo/BaseOS +repo --name="AppStream" --baseurl=file:///run/install/repo/AppStream cdrom # Use text install @@ -32,11 +33,11 @@ services --enabled="NetworkManager,sshd,chronyd" # System timezone timezone UTC -# System booloader configuration +# System bootloader configuration bootloader --location=mbr --boot-drive=sda zerombr clearpart --all --initlabel --drives=sda -part / --fstype="ext4" --grow --asprimary --label=slash --ondisk=sda +autopart --nohome --noswap --nolvm skipx diff --git a/images/capi/packer/ova/linux/centos/http/8/ks.cfg.tmpl b/images/capi/packer/ova/linux/centos/http/8/ks.cfg.tmpl deleted file mode 100644 index 5607fa080e..0000000000 --- a/images/capi/packer/ova/linux/centos/http/8/ks.cfg.tmpl +++ /dev/null @@ -1,75 +0,0 @@ -# version=RHEL8 -# Install OS instead of upgrade -install -cdrom -auth --enableshadow --passalgo=sha512 --kickstart -# License agreement -eula --agreed -# Use text mode install -text -# Disable Initial Setup on first boot -firstboot --disable -# Keyboard layout -keyboard --vckeymap=us --xlayouts='us' -# System language -lang en_US.UTF-8 -# Network information -network --bootproto=dhcp --device=link --activate -network --hostname=rhel8 -firewall --disabled -# Root password -rootpw builder --plaintext -# SELinux configuration -selinux --permissive -# Do not configure the X Window System -skipx -# System timezone -timezone UTC -# Add a user named builder -user --groups=wheel --name=builder --password=$SSH_PASSWORD --plaintext --gecos="builder" - -# System bootloader configuration -bootloader --location=mbr --boot-drive=sda -# Clear the Master Boot Record -zerombr -clearpart --all --initlabel --drives=sda -part / --fstype="ext4" --grow --asprimary --label=slash --ondisk=sda - -# Reboot after successful installation -reboot - -%packages --ignoremissing --excludedocs -# dnf group info minimal-environment -@^minimal-environment -@core -openssh-server -sed -sudo -python3 -open-vm-tools - -# Exclude unnecessary firmwares --iwl*firmware -%end - -# Enable/disable the following services -services --enabled=sshd - -%post --nochroot --logfile=/mnt/sysimage/root/ks-post.log -# Disable quiet boot and splash screen -sed --follow-symlinks -i "s/ rhgb quiet//" /mnt/sysimage/etc/default/grub -sed --follow-symlinks -i "s/ rhgb quiet//" /mnt/sysimage/boot/grub2/grubenv - -# Passwordless sudo for the user 'builder' -echo "builder ALL=(ALL) NOPASSWD: ALL" >> /mnt/sysimage/etc/sudoers.d/builder -# Remove the package cache -yum -y clean all - -# Disable swap -swapoff -a -rm -f /swapfile -sed -ri '/\sswap\s/s/^#?/#/' /etc/fstab - -sed -i '/^\(HWADDR\|UUID\)=/d' /etc/sysconfig/network-scripts/ifcfg-* - -%end \ No newline at end of file diff --git a/images/capi/packer/ova/linux/rhel/http/8/ks.cfg.tmpl b/images/capi/packer/ova/linux/rhel/http/8/ks.cfg.tmpl deleted file mode 100644 index 5607fa080e..0000000000 --- a/images/capi/packer/ova/linux/rhel/http/8/ks.cfg.tmpl +++ /dev/null @@ -1,75 +0,0 @@ -# version=RHEL8 -# Install OS instead of upgrade -install -cdrom -auth --enableshadow --passalgo=sha512 --kickstart -# License agreement -eula --agreed -# Use text mode install -text -# Disable Initial Setup on first boot -firstboot --disable -# Keyboard layout -keyboard --vckeymap=us --xlayouts='us' -# System language -lang en_US.UTF-8 -# Network information -network --bootproto=dhcp --device=link --activate -network --hostname=rhel8 -firewall --disabled -# Root password -rootpw builder --plaintext -# SELinux configuration -selinux --permissive -# Do not configure the X Window System -skipx -# System timezone -timezone UTC -# Add a user named builder -user --groups=wheel --name=builder --password=$SSH_PASSWORD --plaintext --gecos="builder" - -# System bootloader configuration -bootloader --location=mbr --boot-drive=sda -# Clear the Master Boot Record -zerombr -clearpart --all --initlabel --drives=sda -part / --fstype="ext4" --grow --asprimary --label=slash --ondisk=sda - -# Reboot after successful installation -reboot - -%packages --ignoremissing --excludedocs -# dnf group info minimal-environment -@^minimal-environment -@core -openssh-server -sed -sudo -python3 -open-vm-tools - -# Exclude unnecessary firmwares --iwl*firmware -%end - -# Enable/disable the following services -services --enabled=sshd - -%post --nochroot --logfile=/mnt/sysimage/root/ks-post.log -# Disable quiet boot and splash screen -sed --follow-symlinks -i "s/ rhgb quiet//" /mnt/sysimage/etc/default/grub -sed --follow-symlinks -i "s/ rhgb quiet//" /mnt/sysimage/boot/grub2/grubenv - -# Passwordless sudo for the user 'builder' -echo "builder ALL=(ALL) NOPASSWD: ALL" >> /mnt/sysimage/etc/sudoers.d/builder -# Remove the package cache -yum -y clean all - -# Disable swap -swapoff -a -rm -f /swapfile -sed -ri '/\sswap\s/s/^#?/#/' /etc/fstab - -sed -i '/^\(HWADDR\|UUID\)=/d' /etc/sysconfig/network-scripts/ifcfg-* - -%end \ No newline at end of file diff --git a/images/capi/packer/ova/linux/rockylinux/http/9/ks.cfg.tmpl b/images/capi/packer/ova/linux/rockylinux/http/9/ks.cfg.tmpl index 8c01692a09..7bbe82c577 100644 --- a/images/capi/packer/ova/linux/rockylinux/http/9/ks.cfg.tmpl +++ b/images/capi/packer/ova/linux/rockylinux/http/9/ks.cfg.tmpl @@ -1,6 +1,6 @@ # Use CDROM installation media -repo --name="AppStream" --baseurl="http://download.rockylinux.org/pub/rocky/9/AppStream/x86_64/os/" -repo --name="kickstart" --baseurl="http://download.rockylinux.org/pub/rocky/9/devel/x86_64/kickstart/" +repo --name="AppStream" --baseurl="https://download.rockylinux.org/pub/rocky/9/AppStream/x86_64/os/" +repo --name="kickstart" --baseurl="https://download.rockylinux.org/pub/rocky/9/BaseOS/x86_64/kickstart/" cdrom # Use text install diff --git a/images/capi/packer/ova/packer-node.json b/images/capi/packer/ova/packer-node.json index f189b2c934..cfd3f2d37e 100644 --- a/images/capi/packer/ova/packer-node.json +++ b/images/capi/packer/ova/packer-node.json @@ -463,6 +463,7 @@ "block_nouveau_loading": "{{user `block_nouveau_loading`}}", "containerd_gvisor_runtime": "{{user `containerd_gvisor_runtime`}}", "containerd_gvisor_version": "{{user `containerd_gvisor_version`}}", + "containerd_image_pull_progress_timeout": "{{user `containerd_image_pull_progress_timeout`}}", "containerd_version": "{{user `containerd_version`}}", "kubernetes_cni_deb_version": "{{ user `kubernetes_cni_deb_version` }}", "kubernetes_cni_rpm_version": "{{ split (user `kubernetes_cni_rpm_version`) \"-\" 0 }}", @@ -492,6 +493,7 @@ "cdrom_adapter_type": "ide", "cdrom_type": "ide", "cluster": "", + "containerd_image_pull_progress_timeout": null, "containerd_service_url": "https://raw.githubusercontent.com/containerd/containerd/refs/tags/v{{user `containerd_version`}}/containerd.service", "containerd_version": null, "crictl_version": null, diff --git a/images/capi/packer/ova/packer-windows.json b/images/capi/packer/ova/packer-windows.json index 820aeddefb..566c4c61e2 100644 --- a/images/capi/packer/ova/packer-windows.json +++ b/images/capi/packer/ova/packer-windows.json @@ -212,6 +212,7 @@ "vars_inline": { "OS": "{{user `distro_name` | lower}}", "PROVIDER": "ova", + "containerd_image_pull_progress_timeout": "{{user `containerd_image_pull_progress_timeout`}}", "containerd_version": "{{user `containerd_version`}}", "distribution_version": "{{user `distro_version`}}", "kubernetes_version": "{{user `kubernetes_semver`}}", @@ -243,6 +244,7 @@ "cloudbase_plugins": "cloudbaseinit.plugins.windows.createuser.CreateUserPlugin, cloudbaseinit.plugins.common.setuserpassword.SetUserPasswordPlugin, cloudbaseinit.plugins.common.mtu.MTUPlugin, cloudbaseinit.plugins.common.sethostname.SetHostNamePlugin, cloudbaseinit.plugins.common.networkconfig.NetworkConfigPlugin, cloudbaseinit.plugins.windows.extendvolumes.ExtendVolumesPlugin, cloudbaseinit.plugins.common.ephemeraldisk.EphemeralDiskPlugin, cloudbaseinit.plugins.common.sshpublickeys.SetUserSSHPublicKeysPlugin, cloudbaseinit.plugins.common.userdata.UserDataPlugin, cloudbaseinit.plugins.common.localscripts.LocalScriptsPlugin, cloudbaseinit.plugins.windows.createuser.CreateUserPlugin, cloudbaseinit.plugins.windows.extendvolumes.ExtendVolumesPlugin", "cloudbase_plugins_unattend": "cloudbaseinit.plugins.common.mtu.MTUPlugin", "cloudbase_real_time_clock_utc": "true", + "containerd_image_pull_progress_timeout": null, "containerd_url": "", "containerd_version": null, "disable_hypervisor": null, @@ -251,7 +253,7 @@ "http_port_max": "", "http_port_min": "", "ib_version": "{{env `IB_VERSION`}}", - "kubernetes_base_url": "https://kubernetesreleases.blob.core.windows.net/kubernetes/{{user `kubernetes_semver`}}/binaries/node/windows/{{user `kubernetes_goarch`}}", + "kubernetes_base_url": "https://kubernetesreleases.blob.core.windows.net/kubernetes/{{user `kubernetes_semver`}}/bin/windows/{{user `kubernetes_goarch`}}", "kubernetes_http_package_url": "", "kubernetes_typed_version": "kube-{{user `kubernetes_semver`}}", "manifest_output": "manifest.json", diff --git a/images/capi/packer/ova/rhel-8.json b/images/capi/packer/ova/rhel-8.json deleted file mode 100644 index e5ffe440ca..0000000000 --- a/images/capi/packer/ova/rhel-8.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "boot_command_prefix": " text inst.ks=", - "boot_command_suffix": "/8/ks.cfg", - "boot_media_path": "http://{{ .HTTPIP }}:{{ .HTTPPort }}", - "build_name": "rhel-8", - "distro_arch": "amd64", - "distro_name": "rhel", - "distro_version": "8", - "epel_rpm_gpg_key": "https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-8", - "guest_os_type": "rhel8-64", - "http_directory": "./packer/ova/linux/{{user `distro_name`}}/http/", - "iso_checksum": "48f955712454c32718dcde858dea5aca574376a1d7a4b0ed6908ac0b85597811", - "iso_checksum_type": "sha256", - "iso_url": "file:///rhel-8.4-x86_64-dvd.iso", - "os_display_name": "RHEL 8", - "redhat_epel_rpm": "https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm", - "shutdown_command": "shutdown -P now", - "vsphere_guest_os_type": "rhel8_64Guest" -} diff --git a/images/capi/packer/ova/rockylinux-8.json b/images/capi/packer/ova/rockylinux-8.json deleted file mode 100644 index e18608c3f4..0000000000 --- a/images/capi/packer/ova/rockylinux-8.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "boot_command_prefix": " text inst.ks=", - "boot_command_suffix": "hd:LABEL=cidata:/ks.cfg", - "build_name": "rockylinux-8", - "cd_content_location": "./packer/ova/linux/{{user `distro_name`}}/http/{{user `distro_version`}}/*", - "cd_label": "cidata", - "distro_arch": "amd64", - "distro_name": "rockylinux", - "distro_version": "8", - "epel_rpm_gpg_key": "https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-8", - "guest_os_type": "rockylinux-64", - "iso_checksum": "2c735d3b0de921bd671a0e2d08461e3593ac84f64cdaef32e3ed56ba01f74f4b", - "iso_checksum_type": "sha256", - "iso_url": "https://download.rockylinux.org/pub/rocky/8/isos/x86_64/Rocky-8.10-x86_64-minimal.iso", - "os_display_name": "RockyLinux 8", - "redhat_epel_rpm": "https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm", - "shutdown_command": "/sbin/halt -h -p", - "vmx_version": "20", - "vsphere_guest_os_type": "rockylinux_64Guest" -} diff --git a/images/capi/packer/ova/rockylinux-9.json b/images/capi/packer/ova/rockylinux-9.json index 078dd30c83..33dc454e27 100644 --- a/images/capi/packer/ova/rockylinux-9.json +++ b/images/capi/packer/ova/rockylinux-9.json @@ -10,9 +10,9 @@ "epel_rpm_gpg_key": "https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-9", "firmware": "efi", "guest_os_type": "rockylinux-64", - "iso_checksum": "ee3ac97fdffab58652421941599902012179c37535aece76824673105169c4a2", + "iso_checksum": "aed9449cf79eb2d1c365f4f2561f923a80451b3e8fdbf595889b4cf0ac6c58b8", "iso_checksum_type": "sha256", - "iso_url": "https://dl.rockylinux.org/vault/rocky/9.4/isos/x86_64/Rocky-9.4-x86_64-minimal.iso", + "iso_url": "https://dl.rockylinux.org/vault/rocky/9.6/isos/x86_64/Rocky-9.6-x86_64-minimal.iso", "os_display_name": "RockyLinux 9", "redhat_epel_rpm": "https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm", "shutdown_command": "/sbin/halt -h -p", diff --git a/images/capi/packer/ova/ubuntu-2404-efi.json b/images/capi/packer/ova/ubuntu-2404-efi.json index c6ace6cefb..a75d8c322f 100644 --- a/images/capi/packer/ova/ubuntu-2404-efi.json +++ b/images/capi/packer/ova/ubuntu-2404-efi.json @@ -10,9 +10,9 @@ "firmware": "efi", "floppy_dirs": "", "guest_os_type": "ubuntu-64", - "iso_checksum": "c3514bf0056180d09376462a7a1b4f213c1d6e8ea67fae5c25099c6fd3d8274b", + "iso_checksum": "e907d92eeec9df64163a7e454cbc8d7755e8ddc7ed42f99dbc80c40f1a138433", "iso_checksum_type": "sha256", - "iso_url": "https://releases.ubuntu.com/noble/ubuntu-24.04.3-live-server-amd64.iso", + "iso_url": "https://releases.ubuntu.com/noble/ubuntu-24.04.4-live-server-amd64.iso", "os_display_name": "Ubuntu 24.04", "shutdown_command": "shutdown -P now", "vsphere_guest_os_type": "ubuntu64Guest" diff --git a/images/capi/packer/ova/ubuntu-2404.json b/images/capi/packer/ova/ubuntu-2404.json index 17c3c9a4c5..1af3a9af96 100644 --- a/images/capi/packer/ova/ubuntu-2404.json +++ b/images/capi/packer/ova/ubuntu-2404.json @@ -9,9 +9,9 @@ "distro_version": "24.04", "floppy_dirs": "", "guest_os_type": "ubuntu-64", - "iso_checksum": "c3514bf0056180d09376462a7a1b4f213c1d6e8ea67fae5c25099c6fd3d8274b", + "iso_checksum": "e907d92eeec9df64163a7e454cbc8d7755e8ddc7ed42f99dbc80c40f1a138433", "iso_checksum_type": "sha256", - "iso_url": "https://releases.ubuntu.com/noble/ubuntu-24.04.3-live-server-amd64.iso", + "iso_url": "https://releases.ubuntu.com/noble/ubuntu-24.04.4-live-server-amd64.iso", "os_display_name": "Ubuntu 24.04", "shutdown_command": "shutdown -P now", "vsphere_guest_os_type": "ubuntu64Guest" diff --git a/images/capi/packer/powervs/centos-8.json b/images/capi/packer/powervs/centos-8.json deleted file mode 100644 index eb1c1a2953..0000000000 --- a/images/capi/packer/powervs/centos-8.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "build_name": "centos-streams8", - "epel_rpm_gpg_key": "https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-8", - "redhat_epel_rpm": "https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm", - "source_cos_bucket": "power-oss-bucket", - "source_cos_object": "centos-streams-8.ova.gz", - "source_cos_region": "us-south", - "ssh_username": "root" -} diff --git a/images/capi/packer/powervs/packer.json b/images/capi/packer/powervs/packer.json index fb1b779f91..e8e8c2a5a6 100644 --- a/images/capi/packer/powervs/packer.json +++ b/images/capi/packer/powervs/packer.json @@ -74,7 +74,8 @@ "capture_cos_secret_key": "", "containerd_gvisor_runtime": "false", "containerd_gvisor_version": "latest", - "containerd_service_url": "null", + "containerd_image_pull_progress_timeout": null, + "containerd_service_url": "https://raw.githubusercontent.com/containerd/containerd/refs/tags/v{{user `containerd_version`}}/containerd.service", "containerd_version": null, "crictl_version": null, "dhcp_network": "false", diff --git a/images/capi/packer/proxmox/flatcar.json b/images/capi/packer/proxmox/flatcar.json index ae1a3e2606..e1d304cff1 100644 --- a/images/capi/packer/proxmox/flatcar.json +++ b/images/capi/packer/proxmox/flatcar.json @@ -6,7 +6,6 @@ "boot_wait": "180s", "build_name": "flatcar-{{env `FLATCAR_CHANNEL`}}-{{env `FLATCAR_VERSION`}}", "channel_name": "{{env `FLATCAR_CHANNEL`}}", - "crictl_source_type": "http", "distribution_version": "{{env `FLATCAR_CHANNEL`}}-{{env `FLATCAR_VERSION`}}", "distro_name": "flatcar", "guest_os_type": "linux-64", diff --git a/images/capi/packer/proxmox/packer.json.tmpl b/images/capi/packer/proxmox/packer.json.tmpl index 8a03445114..e26a764da4 100644 --- a/images/capi/packer/proxmox/packer.json.tmpl +++ b/images/capi/packer/proxmox/packer.json.tmpl @@ -154,6 +154,7 @@ "OS": "{{user `distro_name` | lower}}", "OS_VERSION": "{{user `distribution_version` | lower}}", "PROVIDER": "qemu", + "containerd_image_pull_progress_timeout": "{{user `containerd_image_pull_progress_timeout`}}", "containerd_version": "{{user `containerd_version`}}", "kubernetes_cni_deb_version": "{{ user `kubernetes_cni_deb_version` }}", "kubernetes_cni_rpm_version": "{{ split (user `kubernetes_cni_rpm_version`) \"-\" 0 }}", @@ -186,6 +187,7 @@ "bridge": "{{env `PROXMOX_BRIDGE`}}", "build_timestamp": "{{timestamp}}", "containerd_version": null, + "containerd_image_pull_progress_timeout": null, "containerd_service_url": "https://raw.githubusercontent.com/containerd/containerd/refs/tags/v{{user `containerd_version`}}/containerd.service", "runc_version": null, "cores": "2", @@ -231,3 +233,4 @@ "scsi_controller": "virtio-scsi-pci" } } + diff --git a/images/capi/packer/proxmox/ubuntu-2404-efi.json b/images/capi/packer/proxmox/ubuntu-2404-efi.json index 065623fd6a..c5cac85d92 100644 --- a/images/capi/packer/proxmox/ubuntu-2404-efi.json +++ b/images/capi/packer/proxmox/ubuntu-2404-efi.json @@ -4,10 +4,10 @@ "build_name": "ubuntu-2404-efi", "distribution_version": "2404", "distro_name": "ubuntu", - "iso_checksum": "c3514bf0056180d09376462a7a1b4f213c1d6e8ea67fae5c25099c6fd3d8274b", + "iso_checksum": "e907d92eeec9df64163a7e454cbc8d7755e8ddc7ed42f99dbc80c40f1a138433", "iso_checksum_type": "sha256", "iso_file": "{{env `ISO_FILE`}}", - "iso_url": "https://releases.ubuntu.com/noble/ubuntu-24.04.3-live-server-amd64.iso", + "iso_url": "https://releases.ubuntu.com/noble/ubuntu-24.04.4-live-server-amd64.iso", "os_display_name": "Ubuntu 24.04", "unmount_iso": "true", "version": "24.04" diff --git a/images/capi/packer/proxmox/ubuntu-2404.json b/images/capi/packer/proxmox/ubuntu-2404.json index 1ca5bca0f4..ed1196d446 100644 --- a/images/capi/packer/proxmox/ubuntu-2404.json +++ b/images/capi/packer/proxmox/ubuntu-2404.json @@ -3,10 +3,10 @@ "build_name": "ubuntu-2404", "distribution_version": "2404", "distro_name": "ubuntu", - "iso_checksum": "c3514bf0056180d09376462a7a1b4f213c1d6e8ea67fae5c25099c6fd3d8274b", + "iso_checksum": "e907d92eeec9df64163a7e454cbc8d7755e8ddc7ed42f99dbc80c40f1a138433", "iso_checksum_type": "sha256", "iso_file": "{{env `ISO_FILE`}}", - "iso_url": "https://releases.ubuntu.com/noble/ubuntu-24.04.3-live-server-amd64.iso", + "iso_url": "https://releases.ubuntu.com/noble/ubuntu-24.04.4-live-server-amd64.iso", "os_display_name": "Ubuntu 24.04", "source_image": "ubuntu-20-04-x64", "unmount_iso": "true", diff --git a/images/capi/packer/qemu/linux/centos/http/8/ks.cfg.tmpl b/images/capi/packer/qemu/linux/centos/http/8/ks.cfg.tmpl deleted file mode 100644 index 6dfed57404..0000000000 --- a/images/capi/packer/qemu/linux/centos/http/8/ks.cfg.tmpl +++ /dev/null @@ -1,74 +0,0 @@ -# version=RHEL8 -# Install OS instead of upgrade -install -cdrom -auth --enableshadow --passalgo=sha512 --kickstart -# License agreement -eula --agreed -# Use text mode install -text -# Disable Initial Setup on first boot -firstboot --disable -# Keyboard layout -keyboard --vckeymap=us --xlayouts='us' -# System language -lang en_US.UTF-8 -# Network information -network --bootproto=dhcp --device=link --activate -network --hostname=rhel8 -firewall --disabled -# Root password -rootpw builder --plaintext -# SELinux configuration -selinux --permissive -# Do not configure the X Window System -skipx -# System timezone -timezone UTC -# Add a user named builder -user --groups=wheel --name=builder --password=$SSH_PASSWORD --plaintext --gecos="builder" - -# System bootloader configuration -bootloader --location=mbr --boot-drive=sda -# Clear the Master Boot Record -zerombr -clearpart --all --initlabel --drives=sda -part / --fstype="ext4" --grow --asprimary --label=slash --ondisk=sda - -# Reboot after successful installation -reboot - -%packages --ignoremissing --excludedocs -# dnf group info minimal-environment -@^minimal-environment -@core -openssh-server -sed -sudo -python3 - -# Exclude unnecessary firmwares --iwl*firmware -%end - -# Enable/disable the following services -services --enabled=sshd - -%post --nochroot --logfile=/mnt/sysimage/root/ks-post.log -# Disable quiet boot and splash screen -sed --follow-symlinks -i "s/ rhgb quiet//" /mnt/sysimage/etc/default/grub -sed --follow-symlinks -i "s/ rhgb quiet//" /mnt/sysimage/boot/grub2/grubenv - -# Passwordless sudo for the user 'builder' -echo "builder ALL=(ALL) NOPASSWD: ALL" >> /mnt/sysimage/etc/sudoers.d/builder -# Remove the package cache -yum -y clean all - -# Disable swap -swapoff -a -rm -f /swapfile -sed -ri '/\sswap\s/s/^#?/#/' /etc/fstab - -sed -i '/^\(HWADDR\|UUID\)=/d' /etc/sysconfig/network-scripts/ifcfg-* - -%end diff --git a/images/capi/packer/qemu/linux/rhel/http/8/ks.cfg.tmpl b/images/capi/packer/qemu/linux/rhel/http/8/ks.cfg.tmpl deleted file mode 100644 index 6dfed57404..0000000000 --- a/images/capi/packer/qemu/linux/rhel/http/8/ks.cfg.tmpl +++ /dev/null @@ -1,74 +0,0 @@ -# version=RHEL8 -# Install OS instead of upgrade -install -cdrom -auth --enableshadow --passalgo=sha512 --kickstart -# License agreement -eula --agreed -# Use text mode install -text -# Disable Initial Setup on first boot -firstboot --disable -# Keyboard layout -keyboard --vckeymap=us --xlayouts='us' -# System language -lang en_US.UTF-8 -# Network information -network --bootproto=dhcp --device=link --activate -network --hostname=rhel8 -firewall --disabled -# Root password -rootpw builder --plaintext -# SELinux configuration -selinux --permissive -# Do not configure the X Window System -skipx -# System timezone -timezone UTC -# Add a user named builder -user --groups=wheel --name=builder --password=$SSH_PASSWORD --plaintext --gecos="builder" - -# System bootloader configuration -bootloader --location=mbr --boot-drive=sda -# Clear the Master Boot Record -zerombr -clearpart --all --initlabel --drives=sda -part / --fstype="ext4" --grow --asprimary --label=slash --ondisk=sda - -# Reboot after successful installation -reboot - -%packages --ignoremissing --excludedocs -# dnf group info minimal-environment -@^minimal-environment -@core -openssh-server -sed -sudo -python3 - -# Exclude unnecessary firmwares --iwl*firmware -%end - -# Enable/disable the following services -services --enabled=sshd - -%post --nochroot --logfile=/mnt/sysimage/root/ks-post.log -# Disable quiet boot and splash screen -sed --follow-symlinks -i "s/ rhgb quiet//" /mnt/sysimage/etc/default/grub -sed --follow-symlinks -i "s/ rhgb quiet//" /mnt/sysimage/boot/grub2/grubenv - -# Passwordless sudo for the user 'builder' -echo "builder ALL=(ALL) NOPASSWD: ALL" >> /mnt/sysimage/etc/sudoers.d/builder -# Remove the package cache -yum -y clean all - -# Disable swap -swapoff -a -rm -f /swapfile -sed -ri '/\sswap\s/s/^#?/#/' /etc/fstab - -sed -i '/^\(HWADDR\|UUID\)=/d' /etc/sysconfig/network-scripts/ifcfg-* - -%end diff --git a/images/capi/packer/qemu/linux/rockylinux/http/8/ks.cfg.tmpl b/images/capi/packer/qemu/linux/rockylinux/http/8/ks.cfg.tmpl deleted file mode 100644 index b8f8480f5c..0000000000 --- a/images/capi/packer/qemu/linux/rockylinux/http/8/ks.cfg.tmpl +++ /dev/null @@ -1,95 +0,0 @@ -# Use CDROM installation media -repo --name="AppStream" --baseurl="http://download.rockylinux.org/pub/rocky/8/AppStream/x86_64/os/" -cdrom - -# Use text install -text - -# Don't run the Setup Agent on first boot -firstboot --disabled -eula --agreed - -# Keyboard layouts -keyboard --vckeymap=us --xlayouts='us' - -# System language -lang en_US.UTF-8 - -# Network information -network --bootproto=dhcp --onboot=on --ipv6=auto --activate --hostname=capv.vm - -# Lock Root account -rootpw --lock - -# Create builder user -user --name=builder --groups=wheel --password=$SSH_PASSWORD --plaintext --shell=/bin/bash - -# System services -selinux --permissive -firewall --disabled -services --enabled="NetworkManager,sshd,chronyd" - -# System timezone -timezone UTC - -# System booloader configuration -bootloader --location=mbr --boot-drive=sda -zerombr -clearpart --all --initlabel --drives=sda -part / --fstype="ext4" --grow --asprimary --label=slash --ondisk=sda - -skipx - -%packages --ignoremissing --excludedocs -openssh-server -sudo -sed -python3 - -# unnecessary firmware --aic94xx-firmware --atmel-firmware --b43-openfwwf --bfa-firmware --ipw2100-firmware --ipw2200-firmware --ivtv-firmware --iwl*-firmware --libertas-usb8388-firmware --ql*-firmware --rt61pci-firmware --rt73usb-firmware --xorg-x11-drv-ati-firmware --zd1211-firmware --cockpit --quota --alsa-* --fprintd-pam --intltool --microcode_ctl -%end - -%addon com_redhat_kdump --disable -%end - -reboot - -%post - -echo 'builder ALL=(ALL) NOPASSWD: ALL' >/etc/sudoers.d/builder -chmod 440 /etc/sudoers.d/builder - -# Remove the package cache -yum -y clean all - -swapoff -a -rm -f /swapfile -sed -ri '/\sswap\s/s/^#?/#/' /etc/fstab - -systemctl enable vmtoolsd -systemctl start vmtoolsd - -# Ensure on next boot that network devices get assigned unique IDs. -sed -i '/^\(HWADDR\|UUID\)=/d' /etc/sysconfig/network-scripts/ifcfg-* - -%end diff --git a/images/capi/packer/qemu/packer.json.tmpl b/images/capi/packer/qemu/packer.json.tmpl index 7f7dacfaa1..76524d47aa 100644 --- a/images/capi/packer/qemu/packer.json.tmpl +++ b/images/capi/packer/qemu/packer.json.tmpl @@ -12,7 +12,7 @@ "{{user `cd_files`}}" ], "cd_label": "cidata", - "cpu_model": "host", + "cpu_model": "{{user `cpu_model`}}", "cpus": "{{user `cpus`}}", "disk_compression": "{{ user `disk_compression`}}", "disk_discard": "{{user `disk_discard`}}", @@ -25,6 +25,7 @@ "http_directory": "{{user `http_directory`}}", "iso_checksum": "{{user `iso_checksum_type`}}:{{user `iso_checksum`}}", "iso_url": "{{user `iso_url`}}", + "machine_type": "{{user `machine_type`}}", "memory": "{{user `memory`}}", "net_device": "virtio-net", "output_directory": "{{user `output_directory`}}", @@ -141,6 +142,7 @@ "OS": "{{user `distro_name` | lower}}", "OS_VERSION": "{{user `distribution_version` | lower}}", "PROVIDER": "qemu", + "containerd_image_pull_progress_timeout": "{{user `containerd_image_pull_progress_timeout`}}", "containerd_version": "{{user `containerd_version`}}", "kubernetes_cni_deb_version": "{{ user `kubernetes_cni_deb_version` }}", "kubernetes_cni_rpm_version": "{{ split (user `kubernetes_cni_rpm_version`) \"-\" 0 }}", @@ -182,6 +184,8 @@ "build_timestamp": "{{timestamp}}", "cd_files": "linux/base/*.nothing", "containerd_version": null, + "containerd_image_pull_progress_timeout": null, + "cpu_model": "host", "containerd_service_url": "https://raw.githubusercontent.com/containerd/containerd/refs/tags/v{{user `containerd_version`}}/containerd.service", "runc_version": null, "cpus": "1", @@ -213,6 +217,7 @@ "kubernetes_series": null, "kubernetes_source_type": null, "machine_id_mode": "444", + "machine_type": "pc", "memory": "2048", "oem_id": "", "output_directory": "./output/{{user `build_name`}}-kube-{{user `kubernetes_semver`}}", diff --git a/images/capi/packer/qemu/qemu-centos-9.json b/images/capi/packer/qemu/qemu-centos-9.json index 1a0a0b6b5b..a68e041ae7 100644 --- a/images/capi/packer/qemu/qemu-centos-9.json +++ b/images/capi/packer/qemu/qemu-centos-9.json @@ -10,9 +10,9 @@ "distro_version": "9", "epel_rpm_gpg_key": "https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-9", "guest_os_type": "centos9-64", - "iso_checksum": "01126d2baac31f520e5b6f20ef0a2d8f2de26c8ffdebbe3ddd0eea99f2c7a765", + "iso_checksum": "2282a8ea8b98188d30958b2274548394cc854e0eb64d25abefc65b1d44e2aebf", "iso_checksum_type": "sha256", - "iso_url": "https://mirror.stream.centos.org/9-stream/BaseOS/x86_64/iso/CentOS-Stream-9-20240304.0-x86_64-dvd1.iso", + "iso_url": "https://mirror.stream.centos.org/9-stream/BaseOS/x86_64/iso/CentOS-Stream-9-20260209.0-x86_64-dvd1.iso", "os_display_name": "CentOS 9 Stream", "redhat_epel_rpm": "https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm", "shutdown_command": "/sbin/halt -h -p" diff --git a/images/capi/packer/qemu/qemu-flatcar.json b/images/capi/packer/qemu/qemu-flatcar.json index 786c25d2ef..4c129c983f 100644 --- a/images/capi/packer/qemu/qemu-flatcar.json +++ b/images/capi/packer/qemu/qemu-flatcar.json @@ -5,7 +5,6 @@ "boot_wait": "180s", "build_name": "flatcar-{{env `FLATCAR_CHANNEL`}}-{{env `FLATCAR_VERSION`}}", "channel_name": "{{env `FLATCAR_CHANNEL`}}", - "crictl_source_type": "http", "distribution_version": "{{env `FLATCAR_CHANNEL`}}-{{env `FLATCAR_VERSION`}}", "distro_name": "flatcar", "guest_os_type": "linux-64", diff --git a/images/capi/packer/qemu/qemu-rhel-8.json b/images/capi/packer/qemu/qemu-rhel-8.json deleted file mode 100644 index 5e6f9f5af3..0000000000 --- a/images/capi/packer/qemu/qemu-rhel-8.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "boot_command_prefix": " text inst.ks=", - "boot_command_suffix": "/8/ks.cfg", - "build_name": "rhel-8", - "distribution_version": "8", - "distro_name": "rhel", - "distro_version": "8", - "epel_rpm_gpg_key": "https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-8", - "guest_os_type": "RedHat_64", - "iso_checksum": "48f955712454c32718dcde858dea5aca574376a1d7a4b0ed6908ac0b85597811", - "iso_checksum_type": "sha256", - "iso_url": "rhel-8.4-x86_64-dvd.iso", - "os_display_name": "RHEL 8", - "redhat_epel_rpm": "https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm", - "shutdown_command": "shutdown -P now" -} diff --git a/images/capi/packer/qemu/qemu-rockylinux-8-cloudimg.json b/images/capi/packer/qemu/qemu-rockylinux-8-cloudimg.json deleted file mode 100644 index 8c204d7a72..0000000000 --- a/images/capi/packer/qemu/qemu-rockylinux-8-cloudimg.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "build_name": "rockylinux-8", - "cd_files": "./packer/qemu/cloud-init/*", - "disk_image": "true", - "distribution_version": "8", - "distro_arch": "amd64", - "distro_name": "rockylinux", - "distro_version": "8", - "epel_rpm_gpg_key": "https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-8", - "guest_os_type": "centos8-64", - "iso_checksum": "https://dl.rockylinux.org/pub/rocky/8/images/x86_64/CHECKSUM", - "iso_checksum_type": "file", - "iso_url": "https://dl.rockylinux.org/pub/rocky/8/images/x86_64/Rocky-8-GenericCloud-Base.latest.x86_64.qcow2", - "os_display_name": "RockyLinux 8", - "redhat_epel_rpm": "https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm", - "shutdown_command": "/sbin/halt -h -p" -} diff --git a/images/capi/packer/qemu/qemu-rockylinux-8.json b/images/capi/packer/qemu/qemu-rockylinux-8.json deleted file mode 100644 index 9453c9944b..0000000000 --- a/images/capi/packer/qemu/qemu-rockylinux-8.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "boot_command_prefix": " text inst.ks=", - "boot_command_suffix": "/8/ks.cfg", - "build_name": "rockylinux-8", - "distribution_version": "8", - "distro_arch": "amd64", - "distro_name": "rockylinux", - "distro_version": "8", - "epel_rpm_gpg_key": "https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-8", - "guest_os_type": "centos8-64", - "iso_checksum": "06019fd7c4f956b2b0ed37393e81c577885e4ebd518add249769846711a09dc4", - "iso_checksum_type": "sha256", - "iso_url": "https://download.rockylinux.org/pub/rocky/8/isos/x86_64/Rocky-8.9-x86_64-minimal.iso", - "os_display_name": "RockyLinux 8", - "redhat_epel_rpm": "https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm", - "shutdown_command": "/sbin/halt -h -p" -} diff --git a/images/capi/packer/qemu/qemu-ubuntu-2404-efi.json b/images/capi/packer/qemu/qemu-ubuntu-2404-efi.json index de66ad7ec9..c57b05fecc 100644 --- a/images/capi/packer/qemu/qemu-ubuntu-2404-efi.json +++ b/images/capi/packer/qemu/qemu-ubuntu-2404-efi.json @@ -5,9 +5,9 @@ "distro_name": "ubuntu", "firmware": "OVMF.fd", "guest_os_type": "ubuntu-64", - "iso_checksum": "c3514bf0056180d09376462a7a1b4f213c1d6e8ea67fae5c25099c6fd3d8274b", + "iso_checksum": "e907d92eeec9df64163a7e454cbc8d7755e8ddc7ed42f99dbc80c40f1a138433", "iso_checksum_type": "sha256", - "iso_url": "https://releases.ubuntu.com/releases/24.04/ubuntu-24.04.3-live-server-amd64.iso", + "iso_url": "https://releases.ubuntu.com/releases/24.04/ubuntu-24.04.4-live-server-amd64.iso", "os_display_name": "Ubuntu 24.04", "shutdown_command": "shutdown -P now", "unmount_iso": "true" diff --git a/images/capi/packer/qemu/qemu-ubuntu-2404.json b/images/capi/packer/qemu/qemu-ubuntu-2404.json index e5d24835c5..9ef9e8d198 100644 --- a/images/capi/packer/qemu/qemu-ubuntu-2404.json +++ b/images/capi/packer/qemu/qemu-ubuntu-2404.json @@ -4,9 +4,9 @@ "distribution_version": "2404", "distro_name": "ubuntu", "guest_os_type": "ubuntu-64", - "iso_checksum": "c3514bf0056180d09376462a7a1b4f213c1d6e8ea67fae5c25099c6fd3d8274b", + "iso_checksum": "e907d92eeec9df64163a7e454cbc8d7755e8ddc7ed42f99dbc80c40f1a138433", "iso_checksum_type": "sha256", - "iso_url": "https://releases.ubuntu.com/releases/24.04/ubuntu-24.04.3-live-server-amd64.iso", + "iso_url": "https://releases.ubuntu.com/releases/24.04/ubuntu-24.04.4-live-server-amd64.iso", "os_display_name": "Ubuntu 24.04", "shutdown_command": "shutdown -P now", "unmount_iso": "true" diff --git a/images/capi/packer/raw/linux/rhel/http/8/ks.cfg.tmpl b/images/capi/packer/raw/linux/rhel/http/8/ks.cfg.tmpl deleted file mode 100644 index 6dfed57404..0000000000 --- a/images/capi/packer/raw/linux/rhel/http/8/ks.cfg.tmpl +++ /dev/null @@ -1,74 +0,0 @@ -# version=RHEL8 -# Install OS instead of upgrade -install -cdrom -auth --enableshadow --passalgo=sha512 --kickstart -# License agreement -eula --agreed -# Use text mode install -text -# Disable Initial Setup on first boot -firstboot --disable -# Keyboard layout -keyboard --vckeymap=us --xlayouts='us' -# System language -lang en_US.UTF-8 -# Network information -network --bootproto=dhcp --device=link --activate -network --hostname=rhel8 -firewall --disabled -# Root password -rootpw builder --plaintext -# SELinux configuration -selinux --permissive -# Do not configure the X Window System -skipx -# System timezone -timezone UTC -# Add a user named builder -user --groups=wheel --name=builder --password=$SSH_PASSWORD --plaintext --gecos="builder" - -# System bootloader configuration -bootloader --location=mbr --boot-drive=sda -# Clear the Master Boot Record -zerombr -clearpart --all --initlabel --drives=sda -part / --fstype="ext4" --grow --asprimary --label=slash --ondisk=sda - -# Reboot after successful installation -reboot - -%packages --ignoremissing --excludedocs -# dnf group info minimal-environment -@^minimal-environment -@core -openssh-server -sed -sudo -python3 - -# Exclude unnecessary firmwares --iwl*firmware -%end - -# Enable/disable the following services -services --enabled=sshd - -%post --nochroot --logfile=/mnt/sysimage/root/ks-post.log -# Disable quiet boot and splash screen -sed --follow-symlinks -i "s/ rhgb quiet//" /mnt/sysimage/etc/default/grub -sed --follow-symlinks -i "s/ rhgb quiet//" /mnt/sysimage/boot/grub2/grubenv - -# Passwordless sudo for the user 'builder' -echo "builder ALL=(ALL) NOPASSWD: ALL" >> /mnt/sysimage/etc/sudoers.d/builder -# Remove the package cache -yum -y clean all - -# Disable swap -swapoff -a -rm -f /swapfile -sed -ri '/\sswap\s/s/^#?/#/' /etc/fstab - -sed -i '/^\(HWADDR\|UUID\)=/d' /etc/sysconfig/network-scripts/ifcfg-* - -%end diff --git a/images/capi/packer/raw/packer.json.tmpl b/images/capi/packer/raw/packer.json.tmpl index 512f5e0d11..652322341a 100644 --- a/images/capi/packer/raw/packer.json.tmpl +++ b/images/capi/packer/raw/packer.json.tmpl @@ -134,6 +134,7 @@ "PROVIDER": "raw", "containerd_gvisor_runtime": "{{user `containerd_gvisor_runtime`}}", "containerd_gvisor_version": "{{user `containerd_gvisor_version`}}", + "containerd_image_pull_progress_timeout": "{{user `containerd_image_pull_progress_timeout`}}", "containerd_version": "{{user `containerd_version`}}", "kubernetes_cni_deb_version": "{{ user `kubernetes_cni_deb_version` }}", "kubernetes_cni_rpm_version": "{{ split (user `kubernetes_cni_rpm_version`) \"-\" 0 }}", @@ -142,7 +143,8 @@ "kubernetes_deb_version": "{{ user `kubernetes_deb_version` }}", "kubernetes_rpm_version": "{{ split (user `kubernetes_rpm_version`) \"-\" 0 }}", "kubernetes_source_type": "{{user `kubernetes_source_type`}}", - "kubernetes_version": "{{user `kubernetes_semver` | replace \"v\" \"\" 1}}" + "kubernetes_version": "{{user `kubernetes_semver` | replace \"v\" \"\" 1}}", + "extra_kernel_boot_params": "{{ user `extra_kernel_boot_params` }}" }, "version": "{{user `goss_version`}}" } @@ -159,6 +161,7 @@ "containerd_gvisor_runtime": "false", "containerd_gvisor_version": "latest", "containerd_version": null, + "containerd_image_pull_progress_timeout": null, "containerd_service_url": "https://raw.githubusercontent.com/containerd/containerd/refs/tags/v{{user `containerd_version`}}/containerd.service", "runc_version": null, "cpus": "1", diff --git a/images/capi/packer/raw/raw-flatcar.json b/images/capi/packer/raw/raw-flatcar.json index c499489e88..c6f8aa6704 100644 --- a/images/capi/packer/raw/raw-flatcar.json +++ b/images/capi/packer/raw/raw-flatcar.json @@ -5,7 +5,6 @@ "boot_wait": "180s", "build_name": "flatcar-{{env `FLATCAR_CHANNEL`}}-{{env `FLATCAR_VERSION`}}", "channel_name": "{{env `FLATCAR_CHANNEL`}}", - "crictl_source_type": "http", "distro_name": "flatcar", "guest_os_type": "linux-64", "http_directory": "./packer/files/flatcar/ignition/", diff --git a/images/capi/packer/raw/raw-rhel-8.json b/images/capi/packer/raw/raw-rhel-8.json deleted file mode 100644 index e1f1735c7a..0000000000 --- a/images/capi/packer/raw/raw-rhel-8.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "boot_command_prefix": " text inst.ks=", - "boot_command_suffix": "/8/ks.cfg", - "build_name": "rhel-8", - "build_target": "raw", - "distro_name": "rhel", - "distro_version": "8", - "epel_rpm_gpg_key": "https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-8", - "guest_os_type": "RedHat_64", - "iso_checksum": "a6a7418a75d721cc696d3cbdd648b5248808e7fef0f8742f518e43b46fa08139", - "iso_checksum_type": "sha256", - "iso_url": "file:///rhel-8.7-x86_64-dvd.iso", - "os_display_name": "RHEL 8", - "redhat_epel_rpm": "https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm", - "shutdown_command": "shutdown -P now" -} diff --git a/images/capi/packer/raw/raw-ubuntu-2404-efi.json b/images/capi/packer/raw/raw-ubuntu-2404-efi.json index 9851777f1f..2c4cdf6d04 100644 --- a/images/capi/packer/raw/raw-ubuntu-2404-efi.json +++ b/images/capi/packer/raw/raw-ubuntu-2404-efi.json @@ -8,9 +8,9 @@ "distro_version_slug": "2404", "firmware": "OVMF.fd", "guest_os_type": "ubuntu-64", - "iso_checksum": "c3514bf0056180d09376462a7a1b4f213c1d6e8ea67fae5c25099c6fd3d8274b", + "iso_checksum": "e907d92eeec9df64163a7e454cbc8d7755e8ddc7ed42f99dbc80c40f1a138433", "iso_checksum_type": "sha256", - "iso_url": "https://releases.ubuntu.com/releases/24.04/ubuntu-24.04.3-live-server-amd64.iso", + "iso_url": "https://releases.ubuntu.com/releases/24.04/ubuntu-24.04.4-live-server-amd64.iso", "os_display_name": "Ubuntu 24.04", "shutdown_command": "shutdown -P now", "unmount_iso": "true" diff --git a/images/capi/packer/raw/raw-ubuntu-2404.json b/images/capi/packer/raw/raw-ubuntu-2404.json index b376441745..529aea8e56 100644 --- a/images/capi/packer/raw/raw-ubuntu-2404.json +++ b/images/capi/packer/raw/raw-ubuntu-2404.json @@ -7,9 +7,9 @@ "distro_version": "24.04", "distro_version_slug": "2404", "guest_os_type": "ubuntu-64", - "iso_checksum": "c3514bf0056180d09376462a7a1b4f213c1d6e8ea67fae5c25099c6fd3d8274b", + "iso_checksum": "e907d92eeec9df64163a7e454cbc8d7755e8ddc7ed42f99dbc80c40f1a138433", "iso_checksum_type": "sha256", - "iso_url": "https://releases.ubuntu.com/releases/24.04/ubuntu-24.04.3-live-server-amd64.iso", + "iso_url": "https://releases.ubuntu.com/releases/24.04/ubuntu-24.04.4-live-server-amd64.iso", "os_display_name": "Ubuntu 24.04", "shutdown_command": "shutdown -P now", "unmount_iso": "true" diff --git a/images/capi/packer/scaleway/packer.json b/images/capi/packer/scaleway/packer.json index e9838f8596..c091dfef36 100644 --- a/images/capi/packer/scaleway/packer.json +++ b/images/capi/packer/scaleway/packer.json @@ -58,6 +58,7 @@ "PROVIDER": "scaleway", "containerd_gvisor_runtime": "{{user `containerd_gvisor_runtime`}}", "containerd_gvisor_version": "{{user `containerd_gvisor_version`}}", + "containerd_image_pull_progress_timeout": "{{user `containerd_image_pull_progress_timeout`}}", "containerd_version": "{{user `containerd_version`}}", "kubernetes_cni_deb_version": "{{ user `kubernetes_cni_deb_version` }}", "kubernetes_cni_rpm_version": "{{ split (user `kubernetes_cni_rpm_version`) \"-\" 0 }}", @@ -80,6 +81,8 @@ "commercial_type": "DEV1-S", "containerd_gvisor_runtime": "false", "containerd_gvisor_version": "latest", + "containerd_image_pull_progress_timeout": null, + "containerd_service_url": "https://raw.githubusercontent.com/containerd/containerd/refs/tags/v{{user `containerd_version`}}/containerd.service", "containerd_version": null, "crictl_version": null, "existing_ansible_ssh_args": "{{env `ANSIBLE_SSH_ARGS`}}", diff --git a/images/capi/packer/vultr/packer.json b/images/capi/packer/vultr/packer.json index c311927a51..208c4353a7 100644 --- a/images/capi/packer/vultr/packer.json +++ b/images/capi/packer/vultr/packer.json @@ -39,6 +39,7 @@ "build_timestamp": "{{timestamp}}", "containerd_gvisor_runtime": "false", "containerd_gvisor_version": "latest", + "containerd_image_pull_progress_timeout": null, "containerd_service_url": "https://raw.githubusercontent.com/containerd/containerd/refs/tags/v{{user `containerd_version`}}/containerd.service", "containerd_version": null, "crictl_version": null, diff --git a/images/capi/scripts/ci-azure-e2e.sh b/images/capi/scripts/ci-azure-e2e.sh index 4aa39762cb..f80065a329 100755 --- a/images/capi/scripts/ci-azure-e2e.sh +++ b/images/capi/scripts/ci-azure-e2e.sh @@ -66,11 +66,20 @@ fi set -o nounset get_random_region() { - local REGIONS=("australiaeast" "canadacentral" "eastus" "eastus2" "northcentralus" "northeurope" "uksouth" "westeurope" "westus2") + # Regions appear more than once to represent the approximate relative amount + # of Standard BS v2 quota in each region. + local REGIONS=( + "australiaeast" + "canadacentral" "canadacentral" "canadacentral" + "francecentral" + "germanywestcentral" + "switzerlandnorth" "switzerlandnorth" "switzerlandnorth" + "uksouth" + ) echo "${REGIONS[${RANDOM} % ${#REGIONS[@]}]}" } -export VALID_CVM_LOCATIONS=("eastus" "northeurope" "westeurope" "westus") +export VALID_CVM_LOCATIONS=("eastus" "germanywestcentral" "northeurope" "switzerlandnorth" "uksouth" "westeurope" "westus") get_random_cvm_region() { echo "${VALID_CVM_LOCATIONS[${RANDOM} % ${#VALID_CVM_LOCATIONS[@]}]}" }